blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1f0fd884caa02d3685d91a38f543a61d4f4c35fd | 312a8fde11293cb142334a3860966ec1f75ac401 | /timesketch/lib/analyzers/contrib/__init__.py | 7c0d5d88fdd9c75789919c74eb42737eae4341a4 | [
"Apache-2.0"
] | permissive | google/timesketch | f0fd09062a8a24bac581d2d4286d095d667d2f10 | 24f471b58ca4a87cb053961b5f05c07a544ca7b8 | refs/heads/master | 2023-08-31T21:48:19.602686 | 2023-08-31T11:24:17 | 2023-08-31T11:24:17 | 21,009,909 | 2,263 | 647 | Apache-2.0 | 2023-09-14T14:08:07 | 2014-06-19T17:49:45 | Python | UTF-8 | Python | false | false | 813 | py | # Copyright 2022 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contrib Analyzer module."""
from timesketch.lib.analyzers.contrib import bigquery_matcher
from timesketch.lib.analyzers.contrib import misp_analyzer
from timesketch.lib.analyzers.contrib import hashlookup_analyzer
| [
"[email protected]"
] | |
abe8566f63107cd5f40e1645166a83765b1cfed1 | f6b1db8c0503a292f6a1da31800269e0bb5f39bd | /web_flask/5-number_template.py | 431a96556f0e1ae94db91f4d1e262f0713d039f8 | [] | no_license | arleybri18/AirBnB_clone_v2 | 142883fde2629c7eb75dddc8e4375a9ca1714555 | 111cabf15cadba09f018b2fe359eec68495035dc | refs/heads/master | 2020-07-07T03:44:31.456739 | 2019-09-09T15:16:55 | 2019-09-09T15:16:55 | 203,235,771 | 0 | 0 | null | 2019-08-19T19:21:54 | 2019-08-19T19:21:54 | null | UTF-8 | Python | false | false | 1,069 | py | #!/usr/bin/python3
""" Import flask class """
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def hello():
""" Function to handle request """
return 'Hello HBNB!'
@app.route('/hbnb')
def hello_hbnb():
""" Function to handle request to path /hbnb """
return 'HBNB'
@app.route('/c/<text>')
def c_route(text):
""" Function to handle request with a variable """
return 'C %s' % text.replace('_', ' ')
@app.route('/python/')
@app.route('/python/<text>')
def python(text='is cool'):
""" Function to handle request with a variable and data default """
return 'Python %s' % text.replace('_', ' ')
@app.route('/number/<int:num>')
def numbers(num):
""" Function to handle request with a variable with specified type """
return '%d is a number' % num
@app.route('/number_template/<int:num>')
def numbers_temp(num):
""" Function to handle request and render template"""
return render_template('5-number.html', number=num)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
| [
"[email protected]"
] | |
178f94e8e5286eb5012579f4c8562e0b113ac483 | 5a1cb546cb5132cc98e8a08acc6233540bc2dd34 | /forms.py | 2ea5dd93c573fdf1bc0e5fea25e98bbc4f5492d8 | [] | no_license | RitikaSingh02/ChitChat | 75cf19e00ce6c12a35cc081e55c4e0b378ee3347 | 9575daf0e61ba2f20797dfadf6ba246470dafbe0 | refs/heads/master | 2023-05-23T21:21:25.138134 | 2021-06-20T19:00:43 | 2021-06-20T19:00:43 | 378,376,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,741 | py | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField
from wtforms.validators import InputRequired, Length, EqualTo, ValidationError
from models import User
def invalid_credentials(form, field):
""" Username and password checker """
password = field.data
username = form.username.data
print(username, password)
# Check username is invalid
user_data = User.query.filter_by(username=username).first()
if user_data is None:
raise ValidationError("Username or password is incorrect")
# Check password in invalid
class RegistrationForm(FlaskForm):
""" Registration form"""
username = StringField('username', validators=[InputRequired(message="Username required"), Length(
min=4, max=25, message="Username must be between 4 and 25 characters")])
password = PasswordField('password', validators=[InputRequired(message="Password required"), Length(
min=4, max=25, message="Password must be between 4 and 25 characters")])
confirm_pswd = PasswordField('confirm_pswd', validators=[InputRequired(
message="Password required"), EqualTo('password', message="Passwords must match")])
def validate_username(self, username):
user_object = User.query.filter_by(username=username.data).first()
if user_object:
raise ValidationError(
"Username already exists. Select a different username.")
class LoginForm(FlaskForm):
""" Login form """
username = StringField('username', validators=[
InputRequired(message="Username required")])
password = PasswordField('password', validators=[InputRequired(
message="Password required"), invalid_credentials])
| [
"[email protected]"
] | |
736e3c73364bb68691205bce0d3d43310bb53493 | 90419da201cd4948a27d3612f0b482c68026c96f | /sdk/python/pulumi_azure_nextgen/azuredata/v20200908preview/sql_managed_instance.py | 8824bb87b76c7f442079127163d4a2a50cf216fa | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | test-wiz-sec/pulumi-azure-nextgen | cd4bee5d70cb0d332c04f16bb54e17d016d2adaf | 20a695af0d020b34b0f1c336e1b69702755174cc | refs/heads/master | 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 | Apache-2.0 | 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null | UTF-8 | Python | false | false | 8,761 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = ['SqlManagedInstance']
class SqlManagedInstance(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
admin: Optional[pulumi.Input[str]] = None,
data_controller_id: Optional[pulumi.Input[str]] = None,
end_time: Optional[pulumi.Input[str]] = None,
instance_endpoint: Optional[pulumi.Input[str]] = None,
k8s_raw: Optional[Any] = None,
last_uploaded_date: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sql_managed_instance_name: Optional[pulumi.Input[str]] = None,
start_time: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
v_core: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
A SqlManagedInstance.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] admin: The instance admin user
:param pulumi.Input[str] data_controller_id: null
:param pulumi.Input[str] end_time: The instance end time
:param pulumi.Input[str] instance_endpoint: The on premise instance endpoint
:param Any k8s_raw: The raw kubernetes information
:param pulumi.Input[str] last_uploaded_date: Last uploaded date from on premise cluster. Defaults to current date time
:param pulumi.Input[str] location: The geo-location where the resource lives
:param pulumi.Input[str] resource_group_name: The name of the Azure resource group
:param pulumi.Input[str] sql_managed_instance_name: The name of SQL Managed Instances
:param pulumi.Input[str] start_time: The instance start time
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[str] v_core: The instance vCore
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['admin'] = admin
__props__['data_controller_id'] = data_controller_id
__props__['end_time'] = end_time
__props__['instance_endpoint'] = instance_endpoint
__props__['k8s_raw'] = k8s_raw
__props__['last_uploaded_date'] = last_uploaded_date
if location is None:
raise TypeError("Missing required property 'location'")
__props__['location'] = location
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if sql_managed_instance_name is None:
raise TypeError("Missing required property 'sql_managed_instance_name'")
__props__['sql_managed_instance_name'] = sql_managed_instance_name
__props__['start_time'] = start_time
__props__['tags'] = tags
__props__['v_core'] = v_core
__props__['name'] = None
__props__['system_data'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:azuredata/v20190724preview:SqlManagedInstance")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(SqlManagedInstance, __self__).__init__(
'azure-nextgen:azuredata/v20200908preview:SqlManagedInstance',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'SqlManagedInstance':
"""
Get an existing SqlManagedInstance resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return SqlManagedInstance(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def admin(self) -> pulumi.Output[Optional[str]]:
"""
The instance admin user
"""
return pulumi.get(self, "admin")
@property
@pulumi.getter(name="dataControllerId")
def data_controller_id(self) -> pulumi.Output[Optional[str]]:
"""
null
"""
return pulumi.get(self, "data_controller_id")
@property
@pulumi.getter(name="endTime")
def end_time(self) -> pulumi.Output[Optional[str]]:
"""
The instance end time
"""
return pulumi.get(self, "end_time")
@property
@pulumi.getter(name="instanceEndpoint")
def instance_endpoint(self) -> pulumi.Output[Optional[str]]:
"""
The on premise instance endpoint
"""
return pulumi.get(self, "instance_endpoint")
@property
@pulumi.getter(name="k8sRaw")
def k8s_raw(self) -> pulumi.Output[Optional[Any]]:
"""
The raw kubernetes information
"""
return pulumi.get(self, "k8s_raw")
@property
@pulumi.getter(name="lastUploadedDate")
def last_uploaded_date(self) -> pulumi.Output[Optional[str]]:
"""
Last uploaded date from on premise cluster. Defaults to current date time
"""
return pulumi.get(self, "last_uploaded_date")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="startTime")
def start_time(self) -> pulumi.Output[Optional[str]]:
"""
The instance start time
"""
return pulumi.get(self, "start_time")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
Read only system data
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="vCore")
def v_core(self) -> pulumi.Output[Optional[str]]:
"""
The instance vCore
"""
return pulumi.get(self, "v_core")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| [
"[email protected]"
] | |
7ddbd53246a76a75ab59a084952c7bcf676d9df6 | 769843f3cb57b9a0e1c68d02637b881bd43a103b | /nanodet/model/arch/gfl.py | b39eb616f9a7f0d17203d726452db6c449690856 | [] | no_license | CaptainEven/MyNanoDet | 9de3165ff14140eeabb362e793d8903f93cfdf77 | 9f5e5835bff8854d8d8c0041a7b3288ab017d7b6 | refs/heads/master | 2023-02-11T13:12:59.651520 | 2021-01-06T06:52:11 | 2021-01-06T06:52:11 | 319,273,525 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 524 | py | from .one_stage import OneStage
class GFL(OneStage):
def __init__(self,
backbone_cfg,
fpn_cfg,
head_cfg, ):
"""
:param backbone_cfg:
:param fpn_cfg:
:param head_cfg:
"""
super(GFL, self).__init__(backbone_cfg,
fpn_cfg,
head_cfg)
def forward(self, x):
x = self.backbone(x)
x = self.fpn(x)
x = self.head(x)
return x
| [
"[email protected]"
] | |
db47d5fc38a06024d4c392f065371e46135a7707 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03608/s871426346.py | ab335095f513950ba52948b0a955d92571997d3a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | from scipy.sparse.csgraph import floyd_warshall
import numpy as np
from itertools import permutations, combinations
n,m,R = map(int,input().split())
r=list(map(int,input().split()))
for i in range(R):
r[i]-=1
d = np.zeros((n,n))
# 入力
for i in range(m):
a,b,c = map(int,input().split())
a,b = a-1, b-1
d[a,b] = c
dist =floyd_warshall(d,directed=0).astype(int)
ans=10**10
for v in permutations(r):
tmp=0
for i in range(R-1):
tmp+=dist[v[i],v[i+1]]
ans=min(ans,tmp)
print(ans) | [
"[email protected]"
] | |
8442183c9909a042b13be80ea1fe9cd51b162c5e | fb64776f71eb2a469395a39c3ff33635eb388357 | /apps/accounts/tests/unit/services/test_session_service.py | 18ae5c6a9b811a936437f0a623fdd3f5404cbe91 | [
"MIT"
] | permissive | jimialex/django-wise | ec79d21c428fd1eea953362890051d2120e19f9e | 3fdc01eabdff459b31e016f9f6d1cafc19c5a292 | refs/heads/master | 2023-04-30T20:59:51.625190 | 2021-05-10T06:55:40 | 2021-05-10T06:55:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,101 | py |
# -*- coding: utf-8 -*-
import json
import pytest
import requests_mock
from doubles import allow
from google.oauth2 import id_token
from rest_framework import status
from apps.accounts.models import User
from apps.accounts.api.error_codes import AccountsErrorCodes
from apps.contrib.api.exceptions.base import APIBaseException
from apps.accounts.services.user_service import UserService
from apps.accounts.services.session_service import SessionService
@pytest.mark.django_db
class SessionServiceTests:
@staticmethod
def test_process_google_token(test_user):
allow(id_token).verify_oauth2_token.and_return({
'iss': SessionService.GOOGLE_ACCOUNTS_URL,
})
allow(UserService).create_or_update_for_social_networks.and_return(test_user)
user = SessionService.process_google_token('valid_token')
assert user is not None
assert isinstance(user, User)
@staticmethod
def test_process_google_token_invalid_issuer():
allow(id_token).verify_oauth2_token.and_return({
'iss': 'https://any.server',
})
with pytest.raises(APIBaseException) as exec_info:
SessionService.process_google_token('valid_token')
assert exec_info.value.detail.code == AccountsErrorCodes.INVALID_GOOGLE_TOKEN_ISSUER.code
@staticmethod
def test_process_google_token_invalid_token():
allow(id_token).verify_oauth2_token.and_raise(ValueError('Token Error'))
with pytest.raises(APIBaseException) as exec_info:
SessionService.process_google_token('valid_token')
assert exec_info.value.detail.code == AccountsErrorCodes.INVALID_GOOGLE_TOKEN_ID.code
@staticmethod
def test_process_facebook_valid_access_token(test_user):
allow(UserService).create_or_update_for_social_networks.and_return(test_user)
access_token = 'valid_access_token'
with requests_mock.mock() as mock:
mock.get(
SessionService.make_facebook_profile_url(access_token),
text=json.dumps({
'email': test_user.email,
'first_name': test_user.first_name,
'last_name': test_user.last_name,
}),
status_code=status.HTTP_200_OK,
)
user = SessionService.process_facebook_token(access_token)
assert user is not None
assert isinstance(user, User)
@staticmethod
def test_process_facebook_token_invalid_access_token():
access_token = 'invalid_access_token'
with requests_mock.mock() as mock:
mock.get(
SessionService.make_facebook_profile_url(access_token),
text=json.dumps({'error': 'facebook_raised_error'}),
status_code=status.HTTP_200_OK,
)
with pytest.raises(APIBaseException) as exec_info:
SessionService.process_facebook_token(access_token)
assert exec_info.value.detail.code == AccountsErrorCodes.INVALID_FACEBOOK_ACCESS_TOKEN.code
@staticmethod
def test_process_facebook_token_invalid_access_token_from_format(test_user):
access_token = 'invalid_access_token'
with requests_mock.mock() as mock:
mock.get(
SessionService.make_facebook_profile_url(access_token),
text='',
status_code=status.HTTP_200_OK,
)
with pytest.raises(APIBaseException) as exec_info:
SessionService.process_facebook_token(access_token)
assert exec_info.value.detail.code == AccountsErrorCodes.INVALID_FACEBOOK_ACCESS_TOKEN.code
@staticmethod
def test_make_user_session(test_user):
session = SessionService.make_user_session(test_user)
assert 'access_token' in session
assert 'refresh_token' in session
@staticmethod
def test_validate_session(test_user):
plain_password = 'new_password'
test_user.set_password(plain_password)
test_user.save()
assert SessionService.validate_session(test_user, plain_password)
@staticmethod
def test_validate_session_invalid_credentials(test_user):
with pytest.raises(APIBaseException) as exec_info:
SessionService.validate_session(None, 'new_password')
assert exec_info.value.detail.code == AccountsErrorCodes.INVALID_CREDENTIALS.code
with pytest.raises(APIBaseException) as exec_info:
SessionService.validate_session(test_user, 'new_password')
assert exec_info.value.detail.code == AccountsErrorCodes.INVALID_CREDENTIALS.code
@staticmethod
def test_validate_session_inactive_account(test_user):
plain_password = 'another_password'
test_user.set_password(plain_password)
test_user.is_active = False
test_user.save()
with pytest.raises(APIBaseException) as exec_info:
SessionService.validate_session(test_user, plain_password)
assert exec_info.value.detail.code == AccountsErrorCodes.INACTIVE_ACCOUNT.code
| [
"[email protected]"
] | |
4699ac187da996ef09de31aa7def4cdc34852f34 | d1ddb9e9e75d42986eba239550364cff3d8f5203 | /google-cloud-sdk/lib/surface/help.py | 0fc91d3eca0088a241bd20d9a83259c5cb961425 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | bopopescu/searchparty | 8ecd702af0d610a7ad3a8df9c4d448f76f46c450 | afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6 | refs/heads/master | 2022-11-19T14:44:55.421926 | 2017-07-28T14:55:43 | 2017-07-28T14:55:43 | 282,495,798 | 0 | 0 | Apache-2.0 | 2020-07-25T17:48:53 | 2020-07-25T17:48:52 | null | UTF-8 | Python | false | false | 1,379 | py | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A calliope command that prints help for another calliope command."""
from googlecloudsdk.calliope import base
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Help(base.Command):
"""Prints detailed help messages for the specified commands.
This command prints a detailed help message for the commands specified
after the ``help'' operand.
"""
@staticmethod
def Args(parser):
parser.add_argument(
'command',
nargs='*',
help="""\
A sequence of group and command names with no flags.
""")
def Run(self, args):
# --document=style=help to signal the metrics.Help() 'help' label in
# actions.RenderDocumentAction().Action().
self.ExecuteCommandDoNotUse(args.command + ['--document=style=help'])
return None
| [
"[email protected]"
] | |
56d9545172d93dc81f00b75c4467ced42d10d46d | 5e0de59693445ef463e8c6a8c05876aa9d975e9a | /student/views.py | cbd41d5ec89b0e3f8c2689c6ddd658b403c5ec6b | [] | no_license | ZhonggenLi/Student-Management-Web- | e511691f4d625e80b5f4460dce7c13788faffd14 | f5a5c30d171a182abe660bccd2c407d3f6ccf7b4 | refs/heads/master | 2022-12-02T08:05:43.493136 | 2020-08-09T02:00:19 | 2020-08-09T02:00:19 | 285,771,013 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 4,785 | py | from django.shortcuts import render
from django.http import HttpResponse
from django.db import models
from student.models import Student
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import redirect
# Create your views here.
@csrf_exempt
def insert(request):
if request.POST:
post = request.POST
new_student = Student(
num = post["num"],
name = post["name"],
chinese = post["chinese"],
math = post["math"],
english = post["english"],
physics = post["physics"],
chemistry = post["chemistry"],
allscore = int(post["chinese"])+int(post["math"])+int(post["english"])+int(post["physics"])+int(post["chemistry"]))
new_student.save()
return render(request, 'insert.html')
def list(request):
student_list = Student.objects.all()
c = {"student_list": student_list, }
return render(request, "list.html", c)
def delete(request):
delete_num = request.GET.get('delete_num')
Student.objects.get(num = delete_num).delete()
return render(request, "delete.html")
def updateStudent(request):
update_num = request.GET.get('update_num')
update_student = Student.objects.get(num=update_num)
a = {"update_student": update_student, }
if request.POST:
update_name = request.POST.get("name")
update_chinese = request.POST.get("chinese")
update_math = request.POST.get("math")
update_english = request.POST.get("english")
update_physics = request.POST.get("physics")
update_chemistry = request.POST.get("chemistry")
update_student.num = update_num
update_student.name = update_name
update_student.chinese = update_chinese
update_student.math = update_math
update_student.english = update_english
update_student.physics = update_physics
update_student.chemistry = update_chemistry
update_student.allscore =int(update_chemistry)+int(update_physics)+int(update_english)+int(update_math)+int(update_chinese)
update_student.save()
return render(request, "update.html", a)
def questu(request):
stu = {}
if request.POST:
quename = request.POST.get("name")
quenum = request.POST.get("num")
if quename:
student_list = Student.objects.filter(name = quename)
stu = {"student_list": student_list, }
elif quenum:
student_list = Student.objects.filter(num = quenum)
stu = {"student_list":student_list, }
return render(request, "questu.html", stu)
def sinsort(request):
stu = {}
if request.POST:
proj = request.POST.get("proj")
if proj == '1':
stulist = Student.objects.order_by("-chinese")
stu = {"stulist": stulist, "proj": proj, }
elif proj == '2':
stulist = Student.objects.order_by("-math")
stu = {"stulist": stulist, "proj": proj, }
elif proj == '3':
stulist = Student.objects.order_by("-english")
stu = {"stulist": stulist, "proj": proj, }
elif proj == '4':
stulist = Student.objects.order_by("-physics")
stu = {"stulist": stulist, "proj": proj, }
elif proj == '5':
stulist = Student.objects.order_by("-chemistry")
stu = {"stulist": stulist, "proj": proj, }
elif proj == '6':
stulist = Student.objects.order_by("-allscore")
stu = {"stulist":stulist,"proj":proj, }
return render(request, "sinsort.html", stu)
def fraction(request):
stu = {}
if request.POST:
score = request.POST.get("score")
if score == '1':
stulist = Student.objects.filter(allscore__gte=600)
stulist = sorted(stulist, key=lambda x:x.allscore, reverse=True)
stu = {"stulist": stulist, }
elif score == '2':
stulist = Student.objects.filter(allscore__gte=500, allscore__lt=600)
stulist = sorted(stulist, key=lambda x: x.allscore, reverse=True)
stu = {"stulist": stulist, }
elif score == '3':
stulist = Student.objects.filter(allscore__gte=400, allscore__lt=500)
stulist = sorted(stulist, key=lambda x: x.allscore, reverse=True)
stu = {"stulist": stulist, }
elif score == '4':
stulist = Student.objects.filter(allscore__gte=300, allscore__lt=400)
stulist = sorted(stulist, key=lambda x: x.allscore, reverse=True)
stu = {"stulist": stulist, }
elif score == '5':
stulist = Student.objects.filter(allscore__lte=300)
stulist = sorted(stulist, key=lambda x: x.allscore, reverse=True)
stu = {"stulist":stulist, }
return render(request, "fraction.html", stu)
| [
"[email protected]"
] | |
2c5e7974069afe82223007f602552e7aa63c0b86 | f810836bea801f2fa85418ac7f5f5ffb0f3e0bda | /abc/abc107/B - Grid Compression.py | 81b610852a92bc42137f84cad2c058bfd44c6c07 | [] | no_license | cocoinit23/atcoder | 0afac334233e5f8c75d447f6adf0ddf3942c3b2c | 39f6f6f4cc893e794d99c514f2e5adc9009ee8ca | refs/heads/master | 2022-08-29T06:01:22.443764 | 2022-07-29T07:20:05 | 2022-07-29T07:20:05 | 226,030,199 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | h, w = map(int, input().split())
grid = []
for i in range(h):
temp = list(input())
if set(temp) != {'.'}:
grid.append(temp)
transpose = [list(x) for x in zip(*grid)]
ans = []
for l in transpose:
if set(l) != {'.'}:
ans.append(l)
ans = [list(x) for x in zip(*ans)]
for l in ans:
print(''.join(l))
| [
"[email protected]"
] | |
23d42de74d03b489f2e51fe784d08a6877779f89 | 6b7c93ee4dc224e3041cd3df8e1d8ab128144cb8 | /dodo.py | 27adb2c078501374359755278adfcebcf6302b9e | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | philip-luk-tangent-anim/doit-py | df915b88d7d3e9964f2d1463d3255ae9c415f2da | 500731ce25e89f327d190b7b8b3fc02bbd71c0f1 | refs/heads/master | 2022-09-08T08:44:06.172002 | 2020-05-24T12:35:05 | 2020-05-24T12:35:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,271 | py | import glob
import subprocess
from doitpy.pyflakes import Pyflakes
from doitpy.coverage import PythonPackage, Coverage
from doitpy.package import Package
from doitpy import docs
DOIT_CONFIG = {'default_tasks': ['pyflakes', 'test']}
def task_pyflakes():
flakes = Pyflakes()
yield flakes.tasks('*.py')
yield flakes.tasks('doitpy/*.py')
yield flakes.tasks('tests/**/*.py',
exclude_paths=['tests/sample/flake_fail.py',])
def task_test():
"""run unit-tests"""
# XXX
return {'actions': ['py.test tests']}
def task_coverage():
cov = Coverage([PythonPackage('doitpy', test_path='tests')],
config={'branch':False})
yield cov.all()
yield cov.src()
yield cov.by_module()
def task_package():
"""upload package to pypi"""
pkg = Package()
yield pkg.manifest_git()
yield pkg.sdist()
yield pkg.sdist_upload()
def task_docs():
doc_files = glob.glob('doc/*.rst') + ['README.rst']
yield docs.spell(doc_files, 'doc/dictionary.txt')
yield docs.sphinx('doc/', 'doc/_build/html/', task_dep=['spell'])
yield docs.pythonhosted_upload('doc/_build/html/', task_dep=['sphinx'])
##########################
from doit.tools import result_dep
init_file = 'doitpy/__init__.py'
def task_version():
"""update version on <pkg-name>/__init__.py and doc/conf.py"""
# get package version from setup.py
# version must be set with a string literal using single/double quotes
# but not triple-quotes.
def version_str2tuple(string):
parts = []
for part in string.split('.'):
parts.append(part if not part.isdigit() else int(part))
return tuple(repr(x) for x in parts)
def get_version():
#cmd = ("""awk 'match($0, /version[[:space:]]*=[[:space:]]*"""
# r"""['\''"](.*)['\''"].*/, ary) {print ary[1]}' setup.py""")
cmd = 'python setup.py --version'
version_str = subprocess.check_output(cmd, shell=True,
universal_newlines=True)
version_str = version_str.strip()
version_tuple = version_str2tuple(version_str)
return {
'version': '.'.join(version_tuple[:2]),
'release': version_str,
'tuple': version_tuple,
}
yield {
'name': 'get_from_setup',
'file_dep': ['setup.py'],
'actions': [get_version],
}
sed = "sed --in-place --regexp-extended "
yield {
'name': 'set_pkg',
'uptodate': [result_dep('version:get_from_setup')],
'getargs': {'version': ('version:get_from_setup', 'tuple')},
'actions': [
sed + r"'s/(__version__ = )(.*)/\1%(version)s/' " + init_file],
'targets': [init_file]
}
doc_file = 'doc/conf.py'
yield {
'name': 'set_doc',
'uptodate': [result_dep('version:get_from_setup')],
'getargs': {
'version': ('version:get_from_setup', 'version'),
'release': ('version:get_from_setup', 'release')},
'actions': [
sed + r""" "s/(version = )(.*)/\1'%(version)s'/" """ + doc_file,
sed + r""" "s/(release = )(.*)/\1'%(release)s'/" """ + doc_file,
]
}
| [
"[email protected]"
] | |
b98590e375b6fc48f66d64b21aa03c098ed29e85 | c9d3b03512dc3b2d268d0e99560889226322487c | /ggH01j/cut_hww_7TeV/mH125bkg/shape.py | 0fd39b064743c2b4a2496e9d1923046b12560d07 | [] | no_license | latinos/DatacardsConfigurations | 57f9d8707b3987de0491c66aa9533f9447cfb043 | 25827f8f8284d50d680ce1527e3b8c17c27d7c4a | refs/heads/master | 2021-01-22T03:05:27.692914 | 2015-04-13T08:22:02 | 2015-04-13T08:22:02 | 14,419,053 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,174 | py | lumi=4.94
chans=['of_0j','of_1j', 'sf_0j', 'sf_1j']
# set of mc samples: 0j1j, vbf
mcset='0j1j-mH125'
dataset='Data2011'
variable='mll' # remember, y:x
selection='hww2011'
# TTree::Draw style as in h(nx,xmin,xmax, ny,ymin,ymax)
# shape range. can be an
# - hard-coded label
# - a tuple (nx,xmin,xmax)
# - 2d tuple (nx,xmin,xmax,ny,ymin,ymax)
# - 1d array ([x0,..,xn],)
# - 2d array ([x0,..,xn],[y0,...,ym])
#range=(4,80,280,16,0,200)
#range = ([80., 130., 180., 230., 280.],[0., 12.5, 25., 37.5, 50., 62.5, 75., 87.5, 100., 112.5, 125, 137.5, 150., 162.5, 175., 187.5, 200.])
# range = ([80., 130., 180., 280.],[0., 25., 37.5, 50., 62.5, 75., 87.5, 100., 112.5, 125, 140., 160., 190., 230., 310., 600.])
range = (1,0,200)
tag='mll_hww'
xlabel='m_{ll}'
# rebin=10
rebin=1
# directories
path_latino = '/shome/mtakahashi/HWW/Tree/ShapeAna/42x_494fb/tree_skim_wwmin/'
path_dd = '/shome/mtakahashi/HWW/Data/dd/hww_2011_494fb/'
#path_latino = '/afs/cern.ch/work/x/xjanssen/public/LatinoTrees/ShapeAnalysis/Tree/tree_skim_wwmin/'
#path_dd = '/afs/cern.ch/user/m/maiko/work/private/Data/dd/hww_2012_195fb/'
# other directories
path_shape_raw='raw'
path_shape_merged='merged'
| [
"[email protected]"
] | |
3a2c6507f4c805423f50e8689b45a9aa9f1b7965 | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/223/users/4342/codes/1764_1577.py | 7620b15a0235fac3283e2333810c92277d214f4a | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | from numpy import *
acel=array(eval(input("aceleracao:")))
vo=array(eval(input("velocidade inicial:")))
N=int(input("numero positivo?"))
i=0
t=0
s=(((acel)*(t**2))/2)+ (vo*t)
while() | [
"[email protected]"
] | |
c87505ef499af065bd826fc255a3323af1107f30 | b503e79ccfca67c8114f5bd7a215f5ae993a0ba4 | /airflow/security/permissions.py | 983ebbd7f48ef4f41509dfb4a9f356808687fe2a | [
"Apache-2.0",
"BSD-3-Clause",
"Python-2.0",
"MIT"
] | permissive | github/incubator-airflow | df1d9780f862ea1df8261ea6015dd50a4583f983 | 73f70e00b9fd294057f8ca6b714a85622f6d5dd5 | refs/heads/gh-2.0.2 | 2023-07-29T18:08:43.140580 | 2022-09-14T18:23:42 | 2022-09-14T18:23:42 | 80,634,006 | 24 | 27 | Apache-2.0 | 2023-04-18T04:24:36 | 2017-02-01T15:34:55 | Python | UTF-8 | Python | false | false | 2,390 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Resource Constants
RESOURCE_ADMIN_MENU = "Admin"
RESOURCE_AIRFLOW = "Airflow"
RESOURCE_AUDIT_LOG = "Audit Logs"
RESOURCE_BROWSE_MENU = "Browse"
RESOURCE_DAG = "DAGs"
RESOURCE_DAG_PREFIX = "DAG:"
RESOURCE_DOCS_MENU = "Docs"
RESOURCE_DOCS_LINK = "Documentation"
RESOURCE_CONFIG = "Configurations"
RESOURCE_CONNECTION = "Connections"
RESOURCE_DAG_CODE = "DAG Code"
RESOURCE_DAG_RUN = "DAG Runs"
RESOURCE_IMPORT_ERROR = "ImportError"
RESOURCE_JOB = "Jobs"
RESOURCE_POOL = "Pools"
RESOURCE_PLUGIN = "Plugins"
RESOURCE_SLA_MISS = "SLA Misses"
RESOURCE_TASK_INSTANCE = "Task Instances"
RESOURCE_TASK_LOG = "Task Logs"
RESOURCE_TASK_RESCHEDULE = "Task Reschedules"
RESOURCE_VARIABLE = "Variables"
RESOURCE_WEBSITE = "Website"
RESOURCE_XCOM = "XComs"
RESOURCE_USERINFO_EDIT_VIEW = "UserInfoEditView"
RESOURCE_RESET_MY_PASSWORD_VIEW = "ResetMyPasswordView"
RESOURCE_USER_DB_MODELVIEW = "UserDBModelView"
RESOURCE_USER_OID_MODELVIEW = "UserOIDModelView"
RESOURCE_USER_LDAP_MODELVIEW = "UserLDAPModelView"
RESOURCE_USER_OAUTH_MODELVIEW = "UserOAuthModelView"
RESOURCE_USER_REMOTEUSER_MODELVIEW = "UserRemoteUserModelView"
# Action Constants
ACTION_CAN_CREATE = "can_create"
ACTION_CAN_READ = "can_read"
ACTION_CAN_EDIT = "can_edit"
ACTION_CAN_DELETE = "can_delete"
ACTION_CAN_ACCESS_MENU = "menu_access"
ACTION_CAN_THIS_FORM_GET = "can_this_form_get"
ACTION_CAN_THIS_FORM_POST = "can_this_form_post"
ACTION_RESETMYPASSWORD = "resetmypassword"
ACTION_CAN_USERINFO = "can_userinfo"
ACTION_USERINFOEDIT = "userinfoedit"
DEPRECATED_ACTION_CAN_DAG_READ = "can_dag_read"
DEPRECATED_ACTION_CAN_DAG_EDIT = "can_dag_edit"
| [
"[email protected]"
] | |
dbbff8315c06c6efb75d5de5c32e6314eb711158 | ecf2511642e0e8fb11616249b062a7c8a130d137 | /src/python_files/points_class.py | dadf59288293c9da1f916d04059e19ca530a1227 | [] | no_license | zaddan/apx_tool_chain | 61abc9d6958ce17d9823ac7bf71ae3097f76decf | ef81dd6515f110d3c8a4b62a642d44d9a4327a79 | refs/heads/master | 2021-01-24T06:37:12.248020 | 2017-04-08T20:39:08 | 2017-04-08T20:39:08 | 40,195,826 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,980 | py | import copy
import sys
import math
import numpy
import settings
import input_list
from calc_psnr import *
from inputs import *
from extract_result_properties import *
#**--------------------**
#**--------------------**
#----disclaimers::: SNR needs to modified when noise is Zero
#**--------------------**
#--------------------**
class points:
def __init__(self):
self.image_accurate_psnr = {}
self.image_accurate_psnr['Buildings.0007'] = 41.14
self.image_accurate_psnr['MtValley.0000'] = 39.67
self.image_accurate_psnr['Water.0004'] = 43.35
self.image_accurate_psnr ['GrassPlantsSky.0003'] = 40.34
self.image_accurate_psnr ['Flowers.0006'] = 39.67
self.image_accurate_psnr ['Buildings.0010'] = 41.14
self.image_accurate_psnr ['Misc.0003'] = 43.35
self.lOfError = []
self.lOfOperand = []
self.lOfAccurateValues = []
self.lOfRawValues = []
self.lOfOutput = []
self.dealingWithPics = False
self.quality_is_set = False
self.quality_calculatable = True
self.input_number = -1
def set_input_number(self, inputNumber):
self.input_number = inputNumber
def set_dealing_with_pics(self, dealingWithPics):
self.dealingWithPics = dealingWithPics
def append_Output(self, output):
self.lOfOutput.append(output)
def get_lOf_output(self):
assert (len(self.lOfOutput) > 0)
return self.lOfOutput
def get_input_number(self):
assert ((self.input_number) >= 0)
return self.input_number
def append_error(self, error):
self.lOfError.append(error)
def set_energy(self, energy):
self.energy = energy
def set_raw_setUp(self, raw_setUp):
self.raw_setUp = raw_setUp
def set_setUp(self, setUp):
self.setUp = setUp
def set_setUp_number(self, setUpNumber):
self.setUpNumber = setUpNumber
def append_lOf_operand(self, operand):
self.lOfOperand.append(operand)
def append_accurate_values(self, value):
self.lOfAccurateValues.append(value)
def set_SNR(self, SNR):
assert(not(1 == 2))
self.SNR = SNR
def set_quality(self, quality_value):
self.quality = abs(quality_value)
self.quality_is_set = True
def set_PSNR(self, PSNR):
self.PSNR = PSNR
def set_input_obj(self, inputObj):
self.inputObj = inputObj
def append_raw_values(self, rawValue):
self.lOfRawValues.append(rawValue)
def get_accurate_values(self):
return self.accurateValues
def get_lOfError(self):
return self.lOfError
def get_energy(self):
return self.energy
def get_setUp(self):
return self.setUp
def get_raw_setUp(self):
return self.raw_setUp
def get_setUp_number(self):
return self.setUpNumber
def get_lOf_operand(self, operand):
return self.lOfOperand
def get_accurate_values(self):
return self.lOfAccurateValues
def get_raw_values(self):
assert(len(self.lOfRawValues)>0)
return self.lOfRawValues
def get_input_obj(self):
return inputObj
def get_dealing_with_pics(self):
return self.dealingWithPics
def calculate_PSNR(self, yourImageName="", originalImageName=""):
refImage = self.inputObj.refImage
noisyImage = self.inputObj.noisyImage
self.PSNR = calculate_psnr(refImage, noisyImage)
def calculate_quality(self,normalize, possibly_worse_case_result_quality, settings_obj, input_index):
#---- calculate mean of accurate values
if (settings_obj.error_mode == "image"):
mean_of_acc_values = calculate_mean_acc_for_image(self.inputObj.refImage, self.inputObj.noisyImage)
elif(settings_obj.error_mode == "nearest_neighbors_2d" and settings_obj.benchmark_name =="sift"):
mean_of_acc_values = numpy.mean(map( lambda x: numpy.mean(x, axis=0)[:-1], self.lOfAccurateValues), axis=0)
else:
mean_of_acc_values = numpy.mean(map( lambda x: numpy.mean(x, axis=0), self.lOfAccurateValues),axis=0)
#--- calculate mean of error values
if (settings_obj.error_mode == "image"):
mean_of_error_values = calculate_error_for_image(self.inputObj.refImage, self.inputObj.noisyImage)
else:
mean_of_error_values = numpy.mean(map( lambda x: numpy.mean(x, axis=0), self.lOfError), axis=0)
#--- specific case of errorTest
if (settings_obj.errorTest):
print "---------------"
print "Vector ass with mean of Acc Vals"
print numpy.mean(self.lOfAccurateValues[0], axis=0)
print "magnitued of mean of Acc Val"
print mean_of_acc_values
print "Vector ass with mean of Erro Vals"
print numpy.mean(self.lOfError[0], axis=0)
print "magnitued of mean of Error"
print mean_of_error_values
print "---------------"
if (settings_obj.outputMode == "uniform"): #convert to a list for compatibility issues
mean_of_error_values = [abs(float(mean_of_error_values))]
mean_of_acc_values = [ abs(float(mean_of_acc_values))]
#semi_sanity check (semi b/c it'll cause problems but it's not wrong perse
some_element_zero = False
for el in mean_of_acc_values:
if el==0:
print "****ERROR ***** acc_val can not be zero"#there is gonna be problems later on
#if mean is zero, but technically there
#there is nothing wrong with that
some_element_zero = False
sys.exit()
# assert (not(mean_of_acc_values == 0)) #there is gonna be problems later on
#if mean is zero, but technically there
#there is nothing wrong with that
# NSR= (mean_of_error_values/mean_of_acc_values)
#divide the corresponding values for avg of errors and acc values
if (settings_obj.DEBUG):
print "mean of acc-val: " + str(mean_of_acc_values);
print "mean of error-val: " + str(mean_of_error_values);
print "mean of acc-val: " + str(mean_of_acc_values);
print "mean of error-val: " + str(mean_of_error_values);
NSR_will_be_zero = True
for el in mean_of_error_values:
if el != 0:
NSR_will_be_zero = False
break
if (NSR_will_be_zero):
for index,el in enumerate(mean_of_error_values):
mean_of_error_values[index] = .00000000001
#--- here now
#print "mean of err vals" + str(mean_of_error_values)
#print "now printing error"
#print self.lOfError
# if (mean_of_error_values[0]) == 0:
# mean_of_error_values[0] = .00000000000001
if not(some_element_zero):
NSR_vector = np.divide(mean_of_error_values,mean_of_acc_values) #should be a vector
NSR_vector_abs = map(lambda x: abs(x), NSR_vector) #should be a vector
NSR = np.mean(NSR_vector_abs) #this should be a scalar number
if(settings_obj.quality_mode == "psnr"):
if not(mean_of_error_values[0] == 0):
PSNR = 10*math.log10(((255*255)/ mean_of_error_values[0]))
else:
PSNR = 10000
#if (normalize and not(possibly_worse_case_result_quality == float("inf"))):
# NSR = NSR#/possibly_worse_case_result_quality
#NSR = NSR/possibly_worse_case_result_quality
if (settings_obj.quality_mode == "snr"):
if(mean_of_error_values[0]== .00000000001):
self.quality = 10000
#print "the quality set is " + str(1/NSR)
self.quality_is_set = True
print "quality is" + str(self.quality)
elif (some_element_zero):
self.quality = 0
#print "the quality set is " + str(1/NSR)
self.quality_is_set = True
elif(NSR == 0):
print "*******ERROR(kind of) noise is zero, make sure SNR is the right quality mode****"
assert(1==0, "this scenario should never be allowed for NSR")
self.quality_calculatable = False
#self.SNR = mean_of_acc_values
self.quality = abs(1/NSR)
self.quality_is_set = True
else:
# print "goftam " + str(self.SNR)
#self.SNR = 1/NSR
self.quality = (abs(1/NSR))/input_list.lOf_accurate_points_quality[input_index]
#print "the quality set is " + str(1/NSR)
self.quality_is_set = True
print "quality is" + str(self.quality)
elif (settings_obj.quality_mode == "nsr"):
self.quality = abs(NSR)/input_list.lOf_accurate_points_quality[input_index]
self.quality_is_set = True
elif (settings_obj.quality_mode == "psnr"):
if not(settings_obj.error_mode == "image"):
print "psnr is not defined for other applications but images. This is b/c\
at the moment I am simply using mean_of_error_values[0] and I am \
not sure if that would work if len(mean_of_error_values) > 0"
# if not(self.inputObj.refImage_name[:-4] in self.image_accurate_psnr.keys()):
# print "this image accurate PSNR for image: " + str(self.inputObj.refImage_name[:-4]) + " is not defined. add it to the list and set it's value to 1, so we can get it's accurate value"
# sys.exit()
self.quality = abs(PSNR/input_list.lOf_accurate_points_quality[input_index])
#self.image_accurate_psnr[self.inputObj.refImage_name[:-4]])
self.quality_is_set = True
else:
print "*****ERROR: this quality_mode: " + str(settings_obj.quality_mode) + " is not defined***"
sys.exit();
def get_PSNR(self):
return self.PSNR
def get_SNR(self):
assert(not(1==0)) #should never get here
return self.SNR
def get_quality(self):
assert(self.quality_is_set)
assert(self.quality >=0)
return self.quality
def set_varios_values(self, energy, quality, setup,raw_setup, input_number, setupNumber):
self.set_energy(energy)
self.set_quality(quality)
self.set_setUp(setup)
self.set_raw_setUp(raw_setup)
self.set_input_number(input_number)
self.set_setUp_number(setupNumber)
| [
"[email protected]"
] | |
c2ae43c2b91dac46aa6f964347b09d8dd965258b | 147d0863f4590649a90ea5f78c66974723a87247 | /api/views.py | dd4ab9ddecab2559c7faf133dec1c6c30553d792 | [] | no_license | jinchuika/ligabot | af5bd5443dc0df7d929e7b866869ba075c91db55 | 69544912e1ac46f281ba2fc78ff913d60d9a2a38 | refs/heads/master | 2021-01-20T12:50:32.894359 | 2017-05-08T14:07:47 | 2017-05-08T14:07:47 | 90,419,163 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,365 | py | from django.db.models import Q
from .serializers import MatchSerializer, TableSerializer, CompetitionSerializer, FixtureSerializer, TeamSerializer
from rest_framework import mixins, generics, viewsets
from api.api_request import RequestHandler
from api.models import Competition, Fixture, Team
class TeamMatchListView(generics.ListAPIView):
serializer_class = MatchSerializer
def get_queryset(self):
req = RequestHandler()
return req.get_team_scores(self.kwargs['team_id'])
class LeagueMatchListView(generics.ListAPIView):
serializer_class = MatchSerializer
def get_queryset(self):
req = RequestHandler()
return req.get_league_scores(self.kwargs['league_id'])
class LeagueStandingListView(generics.ListAPIView):
serializer_class = TableSerializer
def get_queryset(self):
req = RequestHandler()
return req.get_standings(self.kwargs['league_id'])
class CompetitionApiView(mixins.ListModelMixin, generics.GenericAPIView):
serializer_class = CompetitionSerializer
queryset = Competition.objects.all()
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
class FixtureViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = FixtureSerializer
queryset = Fixture.objects.all()
def get_queryset(self):
queryset = Fixture.objects.all()
competition_id = self.request.query_params.get('competition', None)
team_id = self.request.query_params.get('team', None)
home_id = self.request.query_params.get('home', None)
away_id = self.request.query_params.get('away', None)
if competition_id is not None:
queryset = queryset.filter(competition__id=competition_id)
if team_id is not None:
queryset = queryset.filter(Q(home_team__id=team_id) | Q(away_team__id=team_id))
if home_id is not None:
queryset = queryset.filter(home_team__id=home_id)
if away_id is not None:
queryset = queryset.filter(away_team__id=away_id)
return queryset
class CompetitionViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = CompetitionSerializer
queryset = Competition.objects.all()
class TeamViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = TeamSerializer
queryset = Team.objects.all()
| [
"[email protected]"
] | |
ce2c59b57ea5030b3747f41fc82eb48cbecb159b | d44cbbed1061299c733239c513bfa7f530d97be2 | /adminalerts/tests/test_permissions.py | 13f3d1ee56e0790799de2df81bc0c2806d155c80 | [
"MIT"
] | permissive | raonyguimaraes/sodar_core | f6cb331b31476be595ff0d5a279b82f6871530ff | 903eda944ed75aaf54b74d959ef634790c042e57 | refs/heads/master | 2022-07-24T20:00:33.442200 | 2020-04-24T14:03:47 | 2020-04-24T14:03:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,699 | py | """Tests for permissions in the adminalerts app"""
from django.urls import reverse
# Projectroles dependency
from projectroles.tests.test_permissions import TestPermissionBase
from adminalerts.tests.test_models import AdminAlertMixin
class TestAdminAlertPermissions(AdminAlertMixin, TestPermissionBase):
"""Tests for AdminAlert views"""
def setUp(self):
# Create users
self.superuser = self.make_user('superuser')
self.superuser.is_superuser = True
self.superuser.is_staff = True
self.superuser.save()
self.regular_user = self.make_user('regular_user')
# No user
self.anonymous = None
# Create alert
self.alert = self._make_alert(
message='alert',
user=self.superuser,
description='description',
active=True,
)
def test_alert_create(self):
"""Test permissions for AdminAlert creation"""
url = reverse('adminalerts:create')
good_users = [self.superuser]
bad_users = [self.anonymous, self.regular_user]
self.assert_response(url, good_users, 200)
self.assert_response(url, bad_users, 302)
def test_alert_update(self):
"""Test permissions for AdminAlert updating"""
url = reverse(
'adminalerts:update', kwargs={'adminalert': self.alert.sodar_uuid}
)
good_users = [self.superuser]
bad_users = [self.anonymous, self.regular_user]
self.assert_response(url, good_users, 200)
self.assert_response(url, bad_users, 302)
def test_alert_delete(self):
"""Test permissions for AdminAlert deletion"""
url = reverse(
'adminalerts:delete', kwargs={'adminalert': self.alert.sodar_uuid}
)
good_users = [self.superuser]
bad_users = [self.anonymous, self.regular_user]
self.assert_response(url, good_users, 200)
self.assert_response(url, bad_users, 302)
def test_alert_list(self):
"""Test permissions for AdminAlert list"""
url = reverse('adminalerts:list')
good_users = [self.superuser]
bad_users = [self.anonymous, self.regular_user]
self.assert_response(url, good_users, 200)
self.assert_response(url, bad_users, 302)
def test_alert_detail(self):
"""Test permissions for AdminAlert details"""
url = reverse(
'adminalerts:detail', kwargs={'adminalert': self.alert.sodar_uuid}
)
good_users = [self.superuser, self.regular_user]
bad_users = [self.anonymous]
self.assert_response(url, good_users, 200)
self.assert_response(url, bad_users, 302)
| [
"[email protected]"
] | |
a58fb6876185be87b47b79b2b86347d8d57364b3 | d15bdaddab59d1cfea76790004cbad3e5f0c2c55 | /batkin/build_isolated/base_local_planner/catkin_generated/generate_cached_setup.py | a99877387161efbb0a4785123c2005e570c2c1ff | [] | no_license | gychen-n/robot | 4265a1ff469d22550b6b537d1c81aa846ee7641a | 0663a33aea2c2de9e3ac5863307619091e5b5959 | refs/heads/main | 2023-04-10T13:32:06.623682 | 2021-04-16T00:41:04 | 2021-04-16T00:41:04 | 358,431,232 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,555 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/kinetic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/kinetic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in '/home/robot/batkin/devel_isolated/nav_core;/home/robot/batkin/devel_isolated/costmap_2d;/home/robot/batkin/devel_isolated/voxel_grid;/home/robot/batkin/devel_isolated/turtlebot_teleop;/home/robot/batkin/devel_isolated/turtlebot_rapps;/home/robot/batkin/devel_isolated/turtlebot_navigation;/home/robot/batkin/devel_isolated/turtlebot_follower;/home/robot/batkin/devel_isolated/turtlebot_description;/home/robot/batkin/devel_isolated/turtlebot_capabilities;/home/robot/batkin/devel_isolated/turtlebot_calibration;/home/robot/batkin/devel_isolated/turtlebot_bringup;/home/robot/batkin/devel_isolated/turtlebot_apps;/home/robot/batkin/devel_isolated/turtlebot_actions;/home/robot/batkin/devel_isolated/turtlebot;/home/robot/batkin/devel_isolated/tl740d;/home/robot/batkin/devel_isolated/stim;/home/robot/batkin/devel_isolated/stereo_image_proc;/home/robot/batkin/devel_isolated/spacenav_node;/home/robot/batkin/devel_isolated/slam_gmapping;/home/robot/batkin/devel_isolated/simulation_launch;/home/robot/batkin/devel_isolated/rviz_imu_plugin;/home/robot/batkin/devel_isolated/rslidar_sync;/home/robot/batkin/devel_isolated/rslidar_pointcloud;/home/robot/batkin/devel_isolated/rslidar_driver;/home/robot/batkin/devel_isolated/rslidar_msgs;/home/robot/batkin/devel_isolated/rslidar;/home/robot/batkin/devel_isolated/rbx1_apps;/home/robot/batkin/devel_isolated/ps3joy;/home/robot/batkin/devel_isolated/pointcloud_to_laserscan;/home/robot/batkin/devel_isolated/path_rviz_plugin;/home/robot/batkin/devel_isolated/path_server;/home/robot/batkin/devel_isolated/gmapping;/home/robot/batkin/devel_isolated/openslam_gmapping;/home/robot/batkin/devel_isolated/navigation;/home/robot/batkin/devel_isolated/map_server;/home/robot/batkin/devel_isolated/location_fusion;/home/robot/batkin/devel_isolated/joystick_drivers;/home/robot/batkin/devel_isolated/joy_to_twist;/home/robot/batkin/devel_isolated/joy;/home/robot/batkin/devel_isolated/image_view;/home/robot/batkin/devel_isolated/image_rotate;/home/robot/batkin/devel_isolated/image_publisher;/home/robot/batkin/devel_isolated/image_proc;/home/robot/batkin/devel_isolated/image_pipeline;/home/robot/batkin/devel_isolated/freenect_stack;/home/robot/batkin/devel_isolated/freenect_launch;/home/robot/batkin/devel_isolated/freenect_camera;/home/robot/batkin/devel_isolated/fake_localization;/home/robot/batkin/devel_isolated/depth_image_proc;/home/robot/batkin/devel_isolated/dashgo_driver;/home/robot/batkin/devel_isolated/cartographer_rviz;/home/robot/batkin/devel_isolated/cartographer_ros;/home/robot/batkin/devel_isolated/cartographer_ros_msgs;/home/robot/batkin/devel_isolated/camera_calibration;/home/robot/batkin/devel_isolated/autolabor_test_launch;/home/robot/batkin/devel_isolated/autolabor_simulation_object;/home/robot/batkin/devel_isolated/autolabor_simulation_stage;/home/robot/batkin/devel_isolated/autolabor_simulation_location;/home/robot/batkin/devel_isolated/autolabor_simulation_lidar;/home/robot/batkin/devel_isolated/autolabor_simulation_base;/home/robot/batkin/devel_isolated/autolabor_navigation_launch;/home/robot/batkin/devel_isolated/autolabor_keyboard_control;/home/robot/batkin/devel_isolated/autolabor_description;/home/robot/batkin/devel_isolated/ah100b;/home/robot/catkin_ws/devel;/opt/ros/kinetic'.split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/robot/batkin/devel_isolated/base_local_planner/env.sh')
output_filename = '/home/robot/batkin/build_isolated/base_local_planner/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
# print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| [
"[email protected]"
] | |
beca6c6a6fdc8c07bf59fafda1a3c0a1f35f7377 | f13acd0d707ea9ab0d2f2f010717b35adcee142f | /ABC/abc101-abc150/abc125/d.py | 65d866fb02f012f8ac51ed868bc202f0c9c93fe5 | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | KATO-Hiro/AtCoder | 126b9fe89fa3a7cffcbd1c29d42394e7d02fa7c7 | bf43320bc1af606bfbd23c610b3432cddd1806b9 | refs/heads/master | 2023-08-18T20:06:42.876863 | 2023-08-17T23:45:21 | 2023-08-17T23:45:21 | 121,067,516 | 4 | 0 | CC0-1.0 | 2023-09-14T21:59:38 | 2018-02-11T00:32:45 | Python | UTF-8 | Python | false | false | 426 | py | # -*- coding: utf-8 -*-
def main():
n = int(input())
a = list(map(int, input().split()))
minus_count = 0
abs_min = float('inf')
ans = 0
for ai in a:
if ai < 0:
minus_count += 1
abs_min = min(abs_min, abs(ai))
ans += abs(ai)
if minus_count % 2 == 1:
ans -= abs_min * 2
print(ans)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
1c65e874cc500c5891439e350e89441caa59a097 | 14abe13bff2c346430ec7129c49a79ff4f52c5b0 | /canteen_tests/test__main__.py | 30b0a9135238a41d45d9bfcb92a8f477f5f1e761 | [
"MIT"
] | permissive | ianjw11/canteen | 9a3122deed73a545aa8cc0c51b6913e945e23e39 | cfc4ef00ec67df97e08b57222ca16aa9f2659a3e | refs/heads/master | 2021-01-21T02:41:11.139310 | 2014-07-09T15:59:05 | 2014-07-09T15:59:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | # -*- coding: utf-8 -*-
'''
canteen: main tests
~~~~~~~~~~~~~~~~~~~
tests things at the top-level package main for canteen.
:author: Sam Gammon <[email protected]>
:copyright: (c) Sam Gammon, 2014
:license: This software makes use of the MIT Open Source License.
A copy of this license is included as ``LICENSE.md`` in
the root of the project.
'''
| [
"[email protected]"
] | |
3d2bd8ff970e7a9fa6ed06d06362d446b3f710cd | 41dbb27af3a3ecabeb06e2fb45b3440bcc9d2b75 | /client/migrations/0001_initial.py | 8e69fdd04b310bf4d984b437435a5d02419dcaa0 | [] | no_license | joypaulgmail/Dookan | 4df83f37b7bcaff9052d5a09854d0bb344b9f05a | 7febf471dd71cc6ce7ffabce134e1e37a11309f7 | refs/heads/main | 2023-03-02T04:10:19.611371 | 2021-02-09T11:45:32 | 2021-02-09T11:45:32 | 336,476,910 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,240 | py | # Generated by Django 3.1 on 2020-11-28 06:01
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ClientDetail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=60)),
('primary_contact', models.IntegerField()),
('secondary_contact', models.IntegerField()),
('address', models.TextField()),
('pin', models.IntegerField()),
('image', models.ImageField(upload_to='client/')),
('idproof', models.ImageField(upload_to='client/id')),
('password', models.CharField(max_length=50)),
('confirm_password', models.CharField(max_length=50)),
],
options={
'db_table': 'clientdetail',
},
),
migrations.CreateModel(
name='ClientInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=60)),
('primary_contact', models.IntegerField()),
('secondary_contact', models.IntegerField()),
('address', models.TextField()),
('pin', models.IntegerField()),
('image', models.ImageField(upload_to='client/')),
('idproof', models.ImageField(upload_to='client/id')),
('password', models.CharField(max_length=50)),
('confirm_password', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='detail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
('roll', models.IntegerField()),
],
options={
'db_table': 'detail',
},
),
]
| [
"[email protected]"
] | |
5138f623d82eed67afff76da96329637b5feec7f | 853d4cec42071b76a80be38c58ffe0fbf9b9dc34 | /venv/Lib/site-packages/pandas/tests/sparse/test_pivot.py | 44a8194bd5813d75ac10e22e2c0fafc7a5de7834 | [] | no_license | msainTesting/TwitterAnalysis | 5e1646dbf40badf887a86e125ef30a9edaa622a4 | b1204346508ba3e3922a52380ead5a8f7079726b | refs/heads/main | 2023-08-28T08:29:28.924620 | 2021-11-04T12:36:30 | 2021-11-04T12:36:30 | 424,242,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,369 | py | import numpy as np
import pytest
import pandas as pd
import pandas.util.testing as tm
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
@pytest.mark.filterwarnings("ignore:Series.to_sparse:FutureWarning")
@pytest.mark.filterwarnings("ignore:DataFrame.to_sparse:FutureWarning")
class TestPivotTable:
def setup_method(self, method):
rs = np.random.RandomState(0)
self.dense = pd.DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
"B": ["one", "one", "two", "three", "two", "two", "one", "three"],
"C": rs.randn(8),
"D": rs.randn(8),
"E": [np.nan, np.nan, 1, 2, np.nan, 1, np.nan, np.nan],
}
)
self.sparse = self.dense.to_sparse()
def test_pivot_table(self):
res_sparse = pd.pivot_table(self.sparse, index="A", columns="B", values="C")
res_dense = pd.pivot_table(self.dense, index="A", columns="B", values="C")
tm.assert_frame_equal(res_sparse, res_dense)
res_sparse = pd.pivot_table(self.sparse, index="A", columns="B", values="E")
res_dense = pd.pivot_table(self.dense, index="A", columns="B", values="E")
tm.assert_frame_equal(res_sparse, res_dense)
res_sparse = pd.pivot_table(
self.sparse, index="A", columns="B", values="E", aggfunc="mean"
)
res_dense = pd.pivot_table(
self.dense, index="A", columns="B", values="E", aggfunc="mean"
)
tm.assert_frame_equal(res_sparse, res_dense)
def test_pivot_table_with_nans(self):
res_sparse = pd.pivot_table(
self.sparse, index="A", columns="B", values="E", aggfunc="sum"
)
res_dense = pd.pivot_table(
self.dense, index="A", columns="B", values="E", aggfunc="sum"
)
tm.assert_frame_equal(res_sparse, res_dense)
def test_pivot_table_multi(self):
res_sparse = pd.pivot_table(
self.sparse, index="A", columns="B", values=["D", "E"]
)
res_dense = pd.pivot_table(
self.dense, index="A", columns="B", values=["D", "E"]
)
res_dense = res_dense.apply(lambda x: x.astype("Sparse[float64]"))
tm.assert_frame_equal(res_sparse, res_dense)
| [
"[email protected]"
] | |
90f9d81e1ac37422b6bc4c3d5187d3f54eb2b54c | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_083/ch129_2020_04_01_17_04_56_778073.py | 63194e8b169fab2b441de017db0cc2185bde9181 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 211 | py | def verifica_quadrado_perfeito(n):
i=1
y=1
while n>=i:
y=n-i
if y==0:
return True
else:
return False
i+=2
print(verifica_quadrado_perfeito(25)) | [
"[email protected]"
] | |
a0e585c73ccd0f53b6602f193b8f888584358b58 | 0fd2b832673946c9ee532686a2a35bf2680f8408 | /CybORG/CybORG/Shared/Actions/GlobalActions/ListGames.py | 76795b71f41c8585e6fe71ad744f82fc4ac7f0bd | [
"MIT"
] | permissive | pvu1984/cage-challenge-2 | 4e57bad7bc30c7df2b90c2fabc8395a5f2a3e65c | e76722dcd79a6b7511e185cde34fac1e0b45720e | refs/heads/main | 2023-09-02T15:11:32.072215 | 2021-11-12T02:33:19 | 2021-11-12T02:33:19 | 429,307,660 | 0 | 0 | MIT | 2021-11-18T05:27:36 | 2021-11-18T05:27:35 | null | UTF-8 | Python | false | false | 477 | py | # Copyright DST Group. Licensed under the MIT license.
from CybORG.Shared import Observation
from .GlobalAction import GlobalAction
class ListGames(GlobalAction):
"""Get a list of all active games """
def emu_execute(self, team_server) -> Observation:
self._log_info("Listing games")
obs = Observation()
game_ids = team_server.get_games_list()
obs.set_success(True)
obs.add_key_value("game_ids", game_ids)
return obs
| [
"[email protected]"
] | |
3a241204309bb872de8646ff5018a65a4c3b3f50 | a97fb0584709e292a475defc8506eeb85bb24339 | /source code/code/ch713.py | 0b37f65b4505c1c49f2186b0ad00f91dad64d26c | [] | no_license | AAQ6291/PYCATCH | bd297858051042613739819ed70c535901569079 | 27ec4094be785810074be8b16ef84c85048065b5 | refs/heads/master | 2020-03-26T13:54:57.051016 | 2018-08-17T09:05:19 | 2018-08-17T09:05:19 | 144,963,014 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 824 | py | #!/usr/bin/env python
#coding=utf-8
"""
# 多載overloading觀念, 註解的這個區塊無法在Python裡正常運作.
class Obj:
def __init__(self):
pass
def func(self, x=0):
print "func: x = %d ", x
def func(self, x=0, y=x+x):
print "func: x = %d, y = %d", x, y
def func(self, y=0):
print "func: y = %d ", y
"""
"""
我們可以改變觀念, 將上面的寫法改成下面的寫法,
將要傳入的參數都設定在__init__()函數內,
透過這個函數去接收使用者傳進來的值
"""
class Obj:
def __init__(self, x=0, y=0):
self.x = x
self.y = y
self.printState()
def printState(self):
print "func: x = %d, y = %d", self.x, self.y
# class overloading.
obj1 = Obj(10)
obj2 = Obj(10, 10+10)
obj3 = Obj(y=30)
| [
"[email protected]"
] | |
b59d95850094a8e35402245c52ec57944a360d0d | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /azure-mgmt-resource/azure/mgmt/resource/managedapplications/models/resource_py3.py | 5de1d2d3bb943026662197eb95d8d4819b5633fb | [
"MIT"
] | permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 1,617 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Resource(Model):
"""Resource information.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource ID
:vartype id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
:param location: Resource location
:type location: str
:param tags: Resource tags
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, *, location: str=None, tags=None, **kwargs) -> None:
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = location
self.tags = tags
| [
"[email protected]"
] | |
632531ce3e51e510b87b765c03cfa5f6ea264d16 | e1a78591c9702a7e61fbf076d78b59073ff33c37 | /InternetSemLimites/core/tests/test_fame_view.py | e938694682951acc076cf431f78fd1435b7febff | [
"MIT"
] | permissive | sebshub/PublicAPI | eb26f22a34a7022bccd6aaecdfa0c35e5fdf4c7a | 82677389430a30ad82be9fa81643431d9db24f0c | refs/heads/master | 2021-01-19T19:06:40.791795 | 2016-04-14T22:18:43 | 2016-04-14T22:18:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,489 | py | from django.shortcuts import resolve_url
from django.test import TestCase
from InternetSemLimites.core.models import Provider, State
class TestGet(TestCase):
def setUp(self):
sc = State.objects.get(abbr='SC')
go = State.objects.get(abbr='GO')
props = {'name': 'Xpto',
'url': 'http://xp.to',
'source': 'http://twitter.com/xpto',
'category': Provider.FAME,
'other': 'Lorem ipsum',
'published': True}
provider = Provider.objects.create(**props)
provider.coverage = [sc, go]
self.resp = self.client.get(resolve_url('fame'))
def test_get(self):
self.assertEqual(200, self.resp.status_code)
def test_type(self):
self.assertEqual('application/json', self.resp['Content-Type'])
def test_contents(self):
json_resp = self.resp.json()
fame = json_resp['providers']
with self.subTest():
self.assertEqual(1, len(fame))
self.assertNotIn('hall-of-shame', json_resp)
self.assertIn('headers', json_resp)
self.assertEqual('Xpto', fame[0]['name'])
self.assertEqual('http://xp.to', fame[0]['url'])
self.assertEqual('http://twitter.com/xpto', fame[0]['source'])
self.assertEqual(['GO', 'SC'], fame[0]['coverage'])
self.assertEqual('F', fame[0]['category'])
self.assertEqual('Lorem ipsum', fame[0]['other'])
| [
"[email protected]"
] | |
049c15190f0142931a95e324dcc39ec13e781818 | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_7_v3/doc3d/Ablation4_ch016_ep003_7_10/Gather1_W_change_C_fix_2blk/ep0_test/pyr_0s/L5/step10_a.py | 4ff6c76e23f16f7c54b25b5ca0b4b4c49fbd9a01 | [] | no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,733 | py | #############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
code_dir = "\\".join(code_exe_path_element[:-1])
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
sys.path.append(code_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" code_dir:", code_dir)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
kong_to_py_layer = len(code_exe_path_element) - 1 - kong_layer ### 中間 -1 是為了長度轉index
# print(" kong_to_py_layer:", kong_to_py_layer)
if (kong_to_py_layer == 0): template_dir = ""
elif(kong_to_py_layer == 2): template_dir = code_exe_path_element[kong_layer + 1][0:] ### [7:] 是為了去掉 step1x_, 後來覺得好像改有意義的名字不去掉也行所以 改 0
elif(kong_to_py_layer == 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] ### [5:] 是為了去掉 mask_ ,前面的 mask_ 是為了python 的 module 不能 數字開頭, 隨便加的這樣子, 後來覺得 自動排的順序也可以接受, 所以 改0
elif(kong_to_py_layer > 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] + "/" + "/".join(code_exe_path_element[kong_layer + 3: -1])
# print(" template_dir:", template_dir) ### 舉例: template_dir: 7_mask_unet/5_os_book_and_paper_have_dtd_hdr_mix_bg_tv_s04_mae
#############################################################################################################################################################################################################
exp_dir = template_dir
#############################################################################################################################################################################################################
from step06_a_datas_obj import *
from step09_0side_L5 import *
from step10_a2_loss_info_obj import *
from step10_b2_exp_builder import Exp_builder
rm_paths = [path for path in sys.path if code_dir in path]
for rm_path in rm_paths: sys.path.remove(rm_path)
rm_moduless = [module for module in sys.modules if "step09" in module]
for rm_module in rm_moduless: del sys.modules[rm_module]
import Exps_7_v3.doc3d.Ablation4_ch016_ep003_7_10.I_w_M_to_W_pyr.pyr_0s.L5.step10_a as I_w_M_to_W_p20_pyr
from Exps_7_v3.doc3d.Ablation4_ch016_ep003_7_10.W_w_M_to_C_pyr.pyr_2s.L5.step10_a import ch032_1side_6__2side_6__ep010 as W_w_M_to_C_p20_2s_L5_Mae_Sob_k09
#############################################################################################################################################################################################################
'''
exp_dir 是 決定 result_dir 的 "上一層"資料夾 名字喔! exp_dir要巢狀也沒問題~
比如:exp_dir = "6_mask_unet/自己命的名字",那 result_dir 就都在:
6_mask_unet/自己命的名字/result_a
6_mask_unet/自己命的名字/result_b
6_mask_unet/自己命的名字/...
'''
use_db_obj = type8_blender_kong_doc3d_v2
use_loss_obj = [mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Wz").copy(), mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Wy").copy(), mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Wx").copy(), mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Cx").copy(), mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Cy").copy()] ### z, y, x 順序是看 step07_b_0b_Multi_UNet 來對應的喔
#############################################################
### 為了resul_analyze畫空白的圖,建一個empty的 Exp_builder
empty = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_0side_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_0side.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="為了resul_analyze畫空白的圖,建一個empty的 Exp_builder")
#############################################################
ch032_0side = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_0side_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_0side.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(I_to_Wx_Wy_Wz=I_w_M_to_W_p20_pyr.ch032_0side, W_to_Cx_Cy=W_w_M_to_C_p20_2s_L5_Mae_Sob_k09).set_result_name(result_name="p20_L5-ch032_0side")
#############################################################
if(__name__ == "__main__"):
print("build exps cost time:", time.time() - start_time)
if len(sys.argv) < 2:
############################################################################################################
### 直接按 F5 或打 python step10_b1_exp_obj_load_and_train_and_test.py,後面沒有接東西喔!才不會跑到下面給 step10_b_subprocss.py 用的程式碼~~~
ch032_0side.build().run()
# print('no argument')
sys.exit()
### 以下是給 step10_b_subprocess.py 用的,相當於cmd打 python step10_b1_exp_obj_load_and_train_and_test.py 某個exp.build().run()
eval(sys.argv[1])
| [
"[email protected]"
] | |
1acdfb1920626d1363c04b1ba023704c5c8ca65d | 0676f6e4d3510a0305d29aa0b1fe740d538d3b63 | /Python/FitPLine/InterpPline.py | 6390298333cf36a43eb18f5776c4babdb66361c4 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | pgolay/PG_Scripts | f70ffe7e5ca07acd6f4caedc9a9aec566542da7c | 796704a7daa6ac222a40bb02afdb599f74a6b0d4 | refs/heads/master | 2021-01-19T16:53:41.525879 | 2017-02-07T18:26:10 | 2017-02-07T18:26:10 | 2,730,362 | 9 | 1 | null | 2016-12-30T17:58:08 | 2011-11-08T00:04:33 | Python | UTF-8 | Python | false | false | 482 | py | import Rhino
import scriptcontext as sc
import rhinoscriptsyntax as rs
def Test():
pLineId = rs.GetObject("Select a polyline", 4, preselect=True)
if pLineId is None: return
pLineObj= sc.doc.Objects.Find(pLineId)
rc, pLine = pLineObj.Geometry.TryGetPolyline()
ang = Rhino.RhinoMath.ToRadians(30)
x = pLine.BreakAtAngles(ang)
for item in x:
sc.doc.Objects.AddPolyline(item)
pass
if __name__ == "__main__":
Test() | [
"[email protected]"
] | |
d031f9d17e6d7442b98764362e2808e3192189ab | 25e627d2931b89eb53d7dad91b200c1c29fe0233 | /code/lab_05_Important_Trick/sys_demo(3).py | 0740b2d3d833b437ef0f896f320dcf4511a6b1a6 | [] | no_license | tcano2003/ucsc-python-for-programmers | efb973b39dbff9a7d30505fecbd9afb1e4d54ae9 | cc7fe1fb9f38f992464b5d86d92eb073e18e57d5 | refs/heads/master | 2020-03-26T22:13:29.256561 | 2018-08-26T14:54:08 | 2018-08-26T14:54:08 | 145,442,055 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 718 | py | #!/usr/bin/env python3
"""Demonstrating the sys module."""
import sys
def DemoOpenStreams():
"""Demos stderr, stdout and stdin. Also sys.exit()"""
sys.stderr.write('You can write to stderr.\n')
print (>> sys.stderr, "You might like the >> syntax.") # look this up how to convert to Python 3
sys.stdout.write('A fancier way to write to stdout.\n')
print ('Type something: ')
text = sys.stdin.readline()
print ('You said:', text)
def DemoCommandLine():
"""Shows the command line."""
print ('This program is named:', sys.argv[0]) #arg vector
print ('The command line arguments are:', sys.argv[1:])
def main():
DemoCommandLine()
DemoOpenStreams()
main()
| [
"[email protected]"
] | |
cc176a774c950fca1179196f76975994db0ffdf0 | 82042141439ae004fc38bb2ef6238f36ec6bb050 | /accounting/tables.py | e59289b4a7eb9e31422231abdbecd07c0d90f405 | [] | no_license | psteichen/clusil-intranet | 2e9a2cf3b00692a4ef441ebf669af4e63945e9a2 | 5c028d33f6a8559af57a4eeb02fc0f612cb1b261 | refs/heads/master | 2021-07-13T15:40:06.464105 | 2020-06-30T19:51:00 | 2020-06-30T19:51:00 | 27,195,950 | 2 | 1 | null | 2021-06-10T20:06:47 | 2014-11-26T20:59:46 | Python | UTF-8 | Python | false | false | 1,272 | py | #coding=utf-8
from django.conf import settings
from django.utils.safestring import mark_safe
from django.utils.html import escape
from django_tables2.tables import Table
from django_tables2 import Column
from cms.functions import visualiseDateTime
from cms.tables import gen_table_actions
from .models import Fee
#table for visualisation via django_tables2
class InvoiceTable(Table):
row_class = Column(visible=False, empty_values=()) #used to highlight some rows
actions = Column(verbose_name='Actions',empty_values=())
def render_row_class(self, value, record):
if record.paid:
return 'success'
def render_paid(self, value, record):
if value and record.paid_date: #paid
return mark_safe('<span class="glyphicon glyphicon-ok"></span> ('+visualiseDateTime(record.paid_date)+')')
else:
return mark_safe('<span class="glyphicon glyphicon-remove"></span>')
def render_actions(self, value, record):
actions = settings.TEMPLATE_CONTENT['accounting']['actions']['table']
return gen_table_actions(actions,{'id':record.member.id,'year':record.year})
class Meta:
model = Fee
fields = ( 'year', 'member.gen_name', 'invoice', 'paid', 'actions', )
attrs = {"class": "table table-stripped"}
| [
"[email protected]"
] | |
24c37b2731f76d1274c85d3269703a9dd52a74b7 | b2913030cf1646310b08efaa57c2199bb08e37c9 | /general/object_id/apero_astrometrics.py | 8f21305a71d1f0d505c9856a0c692a251e7481f5 | [
"MIT"
] | permissive | njcuk9999/apero-utils | 6f5b5083537562a31573b5c4cc76908c5fe194b9 | 368d53182428ca8befcdd3e5c8ca054f61913711 | refs/heads/master | 2023-08-31T02:56:01.369406 | 2023-08-18T15:12:59 | 2023-08-18T15:12:59 | 238,777,509 | 3 | 5 | MIT | 2023-08-17T14:15:41 | 2020-02-06T20:24:49 | Python | UTF-8 | Python | false | false | 5,169 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
# CODE NAME HERE
# CODE DESCRIPTION HERE
Created on 2021-11-10 at 16:04
@author: cook
"""
from astroquery.utils.tap.core import TapPlus
from astroquery.simbad import Simbad
import numpy as np
from typing import Dict, List, Tuple
import warnings
from apero import lang
from apero.base import base
from apero.core import constants
from apero.core.core import drs_log
from apero.core.core import drs_database
from apero.tools.module.database import manage_databases
# =============================================================================
# Define variables
# =============================================================================
__NAME__ = 'apero_astrometrics.py'
__INSTRUMENT__ = 'None'
__PACKAGE__ = base.__PACKAGE__
__version__ = base.__version__
__author__ = base.__author__
__date__ = base.__date__
__release__ = base.__release__
# get text entry instance
textentry = lang.textentry
# Get Logging function
WLOG = drs_log.wlog
# get the databases
IndexDatabase = drs_database.IndexDatabase
ObjectDatabase = drs_database.ObjectDatabase
# simbad additional columns
SIMBAD_COLUMNS = ['ids']
# =============================================================================
# Define functions
# =============================================================================
def query_object(rawobjname):
# set up the TapPlus class
# simbad = TapPlus(url=URL)
# execute the job
# sjob = simbad.launch_job(QUERY.format(rawobjname))
# get the results
# table = sjob.get_results()
# get results
with warnings.catch_warnings(record=True) as _:
# add ids column
for simbad_column in SIMBAD_COLUMNS:
Simbad.add_votable_fields(simbad_column)
# query simbad
table = Simbad.query_object(rawobjname)
return 0
def query_database(params, rawobjnames: List[str] ) -> List[str]:
"""
Find all objects in the object database and return list of unfound
objects
:param params:
:param rawobjnames:
:return:
"""
# ---------------------------------------------------------------------
# get psuedo constants
pconst = constants.pload()
# ---------------------------------------------------------------------
# Update the object database (recommended only for full reprocessing)
# check that we have entries in the object database
manage_databases.object_db_populated(params)
# update the database if required
if params['UPDATE_OBJ_DATABASE']:
# log progress
WLOG(params, '', textentry('40-503-00039'))
# update database
manage_databases.update_object_database(params, log=False)
# ---------------------------------------------------------------------
# print progress
WLOG(params, '', 'Searching local object database for object names...')
# load the object database after updating
objdbm = ObjectDatabase(params)
objdbm.load_db()
# storage for output - assume none are found
unfound = []
# loop around objects and find them in database
for rawobjname in rawobjnames:
# clean the object
objname = pconst.DRS_OBJ_NAME(rawobjname)
# find correct name in the database (via objname or aliases)
correct_objname, found = objdbm.find_objname(pconst, objname)
# deal with found / not found
if found:
msg = '\tObject: "{0}" found in database as "{1}"'
margs = [rawobjname, correct_objname]
WLOG(params, '', msg.format(*margs))
else:
msg = '\tObject: "{0}" not found in database'
margs = [rawobjname]
WLOG(params, '', msg.format(*margs), colour='yellow')
# add to unfound list
unfound.append(rawobjname)
# return the entry names and the found dictionary
return unfound
# =============================================================================
# Start of code
# =============================================================================
# Main code here
if __name__ == "__main__":
# ----------------------------------------------------------------------
# get params
params = constants.load()
params.set('PID', 'None')
params.set('UPDATE_OBJ_DATABASE', False)
rawobjs = ['Gl699', 'Trappist1', 'Neil']
# ----------------------------------------------------------------------
# step 1: Is object in database?
# ----------------------------------------------------------------------
# query local object database
unfound_objs = query_database(params, rawobjs)
# stop here if all objects found
print('stop')
# ----------------------------------------------------------------------
# step 2: if not in database see if we can find it in simbad
# _ = query_object('Gl699')
# ----------------------------------------------------------------------
# step 3: add to pending database if submitted
# =============================================================================
# End of code
# =============================================================================
| [
"[email protected]"
] | |
957e9a83a0906447bcd7c0b17c13adc0c9d0a9e7 | f338eb32c45d8d5d002a84798a7df7bb0403b3c4 | /DQMOffline/Alignment/test/testMuAlDQM.py | dafae5c403d9e801584a14e97431e543ac40337d | [] | permissive | wouf/cmssw | 0a8a8016e6bebc611f1277379e12bef130464afb | 60da16aec83a0fc016cca9e2a5ed0768ba3b161c | refs/heads/CMSSW_7_3_X | 2022-06-30T04:35:45.380754 | 2015-05-08T17:40:17 | 2015-05-08T17:40:17 | 463,028,972 | 0 | 0 | Apache-2.0 | 2022-02-24T06:05:30 | 2022-02-24T06:05:26 | null | UTF-8 | Python | false | false | 4,965 | py | import os
import FWCore.ParameterSet.Config as cms
process = cms.Process("MuonAlignmentMonitor")
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.cerr.FwkReport.reportEvery = 1000000
process.load("DQMOffline.Alignment.muonAlignment_cfi")
process.load("DQMServices.Components.MEtoEDMConverter_cff")
process.load("Configuration.StandardSequences.Geometry_cff")
process.load("Configuration.StandardSequences.MagneticField_cff")
# ideal geometry and interface
process.load("Geometry.CMSCommonData.cmsIdealGeometryXML_cfi")
process.load("Geometry.TrackerNumberingBuilder.trackerNumberingGeometry_cfi")
process.load("Geometry.MuonNumbering.muonNumberingInitialization_cfi")
process.load("Geometry.CommonDetUnit.bareGlobalTrackingGeometry_cfi")
# reconstruction sequence for Cosmics
process.load("Configuration.StandardSequences.ReconstructionCosmics_cff")
process.load("TrackPropagation.SteppingHelixPropagator.SteppingHelixPropagator_cfi")
#from DQMOffline.Alignment.input_cfi import source
#process.source = source
process.source = cms.Source("PoolSource",
# fileNames = cms.untracked.vstring('/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_ALL_V4_StreamALCARECOMuAlCalIsolatedMu_step2_AlcaReco-v1/0008/001A49E8-93C3-DD11-9720-003048D15CFA.root')
#fileNames = cms.untracked.vstring('/store/data/Commissioning08/Cosmics/ALCARECO/CRAFT_ALL_V9_StreamALCARECOMuAlStandAloneCosmics_225-v3/0008/4EE39297-01FF-DD11-82CF-003048678C9A.root')
fileNames= cms.untracked.vstring('/store/data/Commissioning08/Cosmics/RAW-RECO/CRAFT_ALL_V11_227_Tosca090216_ReReco_FromTrackerPointing_v2/0006/F453F276-8421-DE11-ABFE-001731AF66A7.root')
)
#process.muonAlignment.MuonCollection = "cosmicMuons"
process.muonAlignment.MuonCollection = "ALCARECOMuAlStandAloneCosmics"
#process.muonAlignment.MuonCollection = "ALCARECOMuAlCalIsolatedMu:StandAlone"
#process.muonAlignment.MuonCollection = "ALCARECOMuAlCalIsolatedMu:SelectedMuons"
process.muonAlignment.OutputMEsInRootFile = cms.bool(True)
process.muonAlignment.doSummary= cms.untracked.bool(True)
process.myDQM = cms.OutputModule("PoolOutputModule",
outputCommands = cms.untracked.vstring('drop *','keep *_MEtoEDMConverter_*_MuonAlignmentMonitor'),
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('p')),
fileName = cms.untracked.string('dqm.root')
)
#process.p = cms.Path(process.muonAlignment*process.MEtoEDMConverter)
#process.load("DQMServices.Core.DQM_cfg")
#process.dqmSaverMy = cms.EDFilter("DQMFileSaver",
# convention=cms.untracked.string("Offline"),
# workflow=cms.untracked.string("/Alignment/Muon/ALCARECOMuAlGlobalCosmics_v11_SAalgo"),
# dirName=cms.untracked.string("."),
# saveAtJobEnd=cms.untracked.bool(True),
# forceRunNumber=cms.untracked.int32(1)
# )
process.myTracks = cms.EDFilter("MyTrackSelector",
# src = cms.InputTag("ALCARECOMuAlCalIsolatedMu:StandAlone"),
src = cms.InputTag("ALCARECOMuAlStandAloneCosmics"),
# src = cms.InputTag("cosmicMuons"),
cut = cms.string("pt > 40"),
filter = cms.bool(True)
)
process.p = cms.Path(
#process.myTracks *
process.muonAlignment
# *process.dqmSaverMy
# *process.MEtoEDMConverter
)
process.outpath = cms.EndPath(process.myDQM)
from CondCore.DBCommon.CondDBSetup_cfi import *
#process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_noesprefer_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
if os.environ["CMS_PATH"] != "":
del process.es_prefer_GlobalTag
del process.SiStripPedestalsFakeESSource
del process.siStripBadChannelFakeESSource
del process.siStripBadFiberFakeESSource
del process.DTFakeVDriftESProducer
#process.prefer("GlobalTag")
#process.prefer("GlobalTag")
process.GlobalTag.globaltag = 'CRAFT_ALL_V11::All'
process.myAlignment = cms.ESSource("PoolDBESSource",CondDBSetup,
#connect = cms.string('sqlite_file:/afs/cern.ch/user/p/pablom/public/DBs/Alignments_CRAFT_ALL_V4_refitter.db'),
#connect = cms.string('sqlite_file:/afs/cern.ch/cms/CAF/CMSALCA/ALCA_MUONALIGN/HWAlignment/AlignmentDB/AlignmentsNewEndcap.db'),
connect = cms.string('sqlite_file:/afs/cern.ch/user/p/pivarski/public/CRAFTalignment4_NewTracker_xyphiz2_alignedonlyAPEs.db'),
#DBParameters = CondCore.DBCommon.CondDBSetup_cfi.CondDBSetup.DBParameters,
toGet = cms.VPSet(
cms.PSet(
record = cms.string('DTAlignmentRcd'),
tag = cms.string('DTAlignmentRcd')
),
cms.PSet(
record = cms.string('DTAlignmentErrorRcd'),
tag = cms.string('DTAlignmentErrorRcd')
),
cms.PSet(
record = cms.string('CSCAlignmentRcd'),
tag = cms.string('CSCAlignmentRcd')
),
cms.PSet(
record = cms.string('CSCAlignmentErrorRcd'),
tag = cms.string('CSCAlignmentErrorRcd')
)
)
)
process.es_prefer_myAlignment = cms.ESPrefer("PoolDBESSource","myAlignment")
#process.schedule = cms.Schedule(process.p,process.outpath)
| [
"[email protected]"
] | |
be210f89568c29063428ef21602005c46b94af7c | e86d020f8ade86b86df6ad8590b4458a9d415491 | /test-xooj/practice_infiltration/resource.py | 54fa54208d6e269ffbafe87eff7295d066e7fde0 | [
"BSD-2-Clause"
] | permissive | g842995907/guops-know | e4c3b2d47e345db80c27d3ba821a13e6bf7191c3 | 0df4609f3986c8c9ec68188d6304d033e24b24c2 | refs/heads/master | 2022-12-05T11:39:48.172661 | 2019-09-05T12:35:32 | 2019-09-05T12:35:32 | 202,976,887 | 1 | 4 | null | 2022-11-22T02:57:53 | 2019-08-18T08:10:05 | JavaScript | UTF-8 | Python | false | false | 787 | py | # -*- coding: utf-8 -*-
from common_env.models import Env
from practice import resource
class PracticeInfiltrationTaskMeta(resource.SolvedBaseTask):
subsidiary = [{
'force': {
'create_user_id': None,
'last_edit_user_id': None,
},
'subsidiary': {
'category': {
'get': lambda self: self.category,
'set': lambda self, category: setattr(self, 'category', category),
},
'envs': {
'many_to_many': True,
'get': lambda self: self.envs.filter(env__status=Env.Status.TEMPLATE),
'set': lambda self, task_envs: self.envs.add(*task_envs),
},
},
'markdownfields': ['content', 'official_writeup']
}] | [
"[email protected]"
] | |
2b7af92bfdd54503386f5e94c97ca30500b26788 | 409c4d0dce72de987dff7c76857499fba8f8b7a0 | /disargv.py | 74cc21ae8cb5195e712dce1e98515d6ada50d2d1 | [] | no_license | crystaleone/test | b4fece7fbc4e8ddd6186ea13245c62970c6d7038 | 4af3964bf6a657e888c7850f07a031440ba29e7a | refs/heads/master | 2021-01-18T19:17:36.924170 | 2017-09-19T03:37:01 | 2017-09-19T03:37:01 | 86,895,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 98 | py | import sys
l1 = sys.argv[0]
l2 = sys.argv[1]
l3 = sys.argv[2]
l4 = sys.argv[3]
print(l1,l2,l3,l4)
| [
"[email protected]"
] | |
48cf24d2660484608ab2a4153721faff3b921745 | 61673ab9a42f7151de7337608c442fa6247f13bb | /tkinter/minimize - iconify/main.py | 5f35a5d61c37b30d04da61f12bdcb2aa2ff74595 | [
"MIT"
] | permissive | furas/python-examples | 22d101670ecd667a29376d7c7d7d86f8ec71f6cf | 95cb53b664f312e0830f010c0c96be94d4a4db90 | refs/heads/master | 2022-08-23T23:55:08.313936 | 2022-08-01T14:48:33 | 2022-08-01T14:48:33 | 45,575,296 | 176 | 91 | MIT | 2021-02-17T23:33:37 | 2015-11-04T23:54:32 | Python | UTF-8 | Python | false | false | 648 | py | #!/usr/bin/env python3
# date: 2020.06.27
import tkinter as tk
root = tk.Tk()
# button [X] minimize (iconify) the main window
root.protocol("WM_DELETE_WINDOW", root.iconify)
# key ESC minimize (iconify) the main window
#root.bind('<Escape>', lambda event: root.destroy())
root.bind('<Escape>', lambda event: root.iconify())
# create a menu bar with an `Exit` and `Hide`
menubar = tk.Menu(root)
filemenu = tk.Menu(menubar, tearoff=0)
filemenu.add_command(label="Hide", command=root.iconify)
filemenu.add_command(label="Exit", command=root.destroy)
menubar.add_cascade(label="File", menu=filemenu)
root.config(menu=menubar)
root.mainloop()
| [
"[email protected]"
] | |
508ba345964380a61362afc629dea987c61c2b0c | c97ad9dbc96d151b5ef38a6d6dbac454d6e57576 | /architectures/preprocessing.py | f15f66e3b153c6b3f0bbf3ec310c58605c37fd61 | [
"BSD-3-Clause"
] | permissive | WangVictor/jets | 75177ff68fad2eaf3274537afcee325d93ff1d5f | 1f7209b4c026b63cad94aa8c93e2b8721b259b98 | refs/heads/master | 2021-07-14T05:39:54.983839 | 2017-10-17T20:48:01 | 2017-10-17T20:48:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,137 | py | import numpy as np
import copy
import pickle
import torch
from torch.autograd import Variable
# wrapping
def wrap(y, dtype='float'):
y_wrap = Variable(torch.from_numpy(y))
if dtype=='float':
y_wrap = y_wrap.float()
elif dtype == 'long':
y_wrap = y_wrap.long()
if torch.cuda.is_available():
y_wrap = y_wrap.cuda()
return y_wrap
def unwrap(y_wrap):
if y_wrap.is_cuda:
y = y_wrap.cpu().data.numpy()
else:
y = y_wrap.data.numpy()
return y
def wrap_X(X):
X_wrap = copy.deepcopy(X)
for jet in X_wrap:
jet["content"] = wrap(jet["content"])
return X_wrap
def unwrap_X(X_wrap):
X_new = []
for jet in X_wrap:
jet["content"] = unwrap(jet["content"])
X_new.append(jet)
return X_new
'''
def wrap(y, dtype='float'):
y_wrap = Variable(torch.from_numpy(y))
if dtype=='float':
y_wrap = y_wrap.float()
elif dtype == 'long':
y_wrap = y_wrap.long()
if torch.cuda.is_available():
y_wrap = y_wrap.cuda()
#y = y_wrap
return y_wrap
def unwrap(y):
if y.is_cuda:
y_unwrap = y.cpu().data.numpy()
else:
y_unwrap = y.data.numpy()
return y_unwrap
def wrap_X(X):
for jet in X:
jet['content'] = wrap(jet["content"])
#X = copy.copy(X_)
#for i, (jet, jet_) in enumerate(X, X_):
# jet['content'] = wrap(jet_["content"])
return X
def unwrap_X(X):
for jet in X:
jet["content"] = unwrap(jet["content"])
return X
'''
# Data loading related
def load_from_pickle(filename, n_jets):
jets = []
fd = open(filename, "rb")
for i in range(n_jets):
jet = pickle.load(fd)
jets.append(jet)
fd.close()
return jets
# Jet related
def _pt(v):
pz = v[2]
p = (v[0:3] ** 2).sum() ** 0.5
eta = 0.5 * (np.log(p + pz) - np.log(p - pz))
pt = p / np.cosh(eta)
return pt
def permute_by_pt(jet, root_id=None):
# ensure that the left sub-jet has always a larger pt than the right
if root_id is None:
root_id = jet["root_id"]
if jet["tree"][root_id][0] != -1:
left = jet["tree"][root_id][0]
right = jet["tree"][root_id][1]
pt_left = _pt(jet["content"][left])
pt_right = _pt(jet["content"][right])
if pt_left < pt_right:
jet["tree"][root_id][0] = right
jet["tree"][root_id][1] = left
permute_by_pt(jet, left)
permute_by_pt(jet, right)
return jet
def rewrite_content(jet):
jet = copy.deepcopy(jet)
if jet["content"].shape[1] == 5:
pflow = jet["content"][:, 4].copy()
content = jet["content"]
tree = jet["tree"]
def _rec(i):
if tree[i, 0] == -1:
pass
else:
_rec(tree[i, 0])
_rec(tree[i, 1])
c = content[tree[i, 0]] + content[tree[i, 1]]
content[i] = c
_rec(jet["root_id"])
if jet["content"].shape[1] == 5:
jet["content"][:, 4] = pflow
return jet
def extract(jet, pflow=False):
# per node feature extraction
jet = copy.deepcopy(jet)
s = jet["content"].shape
if not pflow:
content = np.zeros((s[0], 7))
else:
# pflow value will be one-hot encoded
content = np.zeros((s[0], 7+4))
for i in range(len(jet["content"])):
px = jet["content"][i, 0]
py = jet["content"][i, 1]
pz = jet["content"][i, 2]
p = (jet["content"][i, 0:3] ** 2).sum() ** 0.5
eta = 0.5 * (np.log(p + pz) - np.log(p - pz))
theta = 2 * np.arctan(np.exp(-eta))
pt = p / np.cosh(eta)
phi = np.arctan2(py, px)
content[i, 0] = p
content[i, 1] = eta if np.isfinite(eta) else 0.0
content[i, 2] = phi
content[i, 3] = jet["content"][i, 3]
content[i, 4] = (jet["content"][i, 3] /
jet["content"][jet["root_id"], 3])
content[i, 5] = pt if np.isfinite(pt) else 0.0
content[i, 6] = theta if np.isfinite(theta) else 0.0
if pflow:
if jet["content"][i, 4] >= 0:
content[i, 7+int(jet["content"][i, 4])] = 1.0
jet["content"] = content
return jet
def randomize(jet):
# build a random tree
jet = copy.deepcopy(jet)
leaves = np.where(jet["tree"][:, 0] == -1)[0]
nodes = [n for n in leaves]
content = [jet["content"][n] for n in nodes]
nodes = [i for i in range(len(nodes))]
tree = [[-1, -1] for n in nodes]
pool = [n for n in nodes]
next_id = len(nodes)
while len(pool) >= 2:
i = np.random.randint(len(pool))
left = pool[i]
del pool[i]
j = np.random.randint(len(pool))
right = pool[j]
del pool[j]
nodes.append(next_id)
c = (content[left] + content[right])
if len(c) == 5:
c[-1] = -1
content.append(c)
tree.append([left, right])
pool.append(next_id)
next_id += 1
jet["content"] = np.array(content)
jet["tree"] = np.array(tree).astype(int)
jet["root_id"] = len(jet["tree"]) - 1
return jet
def sequentialize_by_pt(jet, reverse=False):
# transform the tree into a sequence ordered by pt
jet = copy.deepcopy(jet)
leaves = np.where(jet["tree"][:, 0] == -1)[0]
nodes = [n for n in leaves]
content = [jet["content"][n] for n in nodes]
nodes = [i for i in range(len(nodes))]
tree = [[-1, -1] for n in nodes]
pool = sorted([n for n in nodes],
key=lambda n: _pt(content[n]),
reverse=reverse)
next_id = len(pool)
while len(pool) >= 2:
right = pool[-1]
left = pool[-2]
del pool[-1]
del pool[-1]
nodes.append(next_id)
c = (content[left] + content[right])
if len(c) == 5:
c[-1] = -1
content.append(c)
tree.append([left, right])
pool.append(next_id)
next_id += 1
jet["content"] = np.array(content)
jet["tree"] = np.array(tree).astype(int)
jet["root_id"] = len(jet["tree"]) - 1
return jet
| [
"[email protected]"
] | |
e42b386d67393c6af683599183132b972f57f57a | 928c53ea78be51eaf05e63f149fb291ec48be73e | /Min_Steps_to_Make_Piles_Equal_Height.py | 527f25bb619af81199021e028c3d6393516d14b4 | [] | no_license | saurabhchris1/Algorithm-Pratice-Questions-LeetCode | 35021d8fc082ecac65d7970d9f83f9be904fb333 | ea4a7d6a78d86c8619f91a75594de8eea264bcca | refs/heads/master | 2022-12-10T16:50:50.678365 | 2022-12-04T10:12:18 | 2022-12-04T10:12:18 | 219,918,074 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,697 | py | # Alexa is given n piles of equal or unequal heights. In one step, Alexa can remove any number
# of boxes from the pile which has the maximum height and try to make it equal to the one which
# is just lower than the maximum height of the stack. Determine the minimum number of steps
# required to make all of the piles equal in height.
#
# Example 1:
#
# Input: piles = [5, 2, 1]
# Output: 3
# Explanation:
# Step 1: reducing 5 -> 2 [2, 2, 1]
# Step 2: reducing 2 -> 1 [2, 1, 1]
# Step 3: reducing 2 -> 1 [1, 1, 1]
# So final number of steps required is 3.
#
# Let's take an example.
# Input : [1, 1, 2, 2, 2, 3, 3, 3, 4, 4]
# Output : 15
# After sorting in reverse, we have...
# [4, 4, 3, 3, 3, 2, 2, 2, 1] --> (2 steps to transform the 4's) --> The 3's must wait for 2 numbers
# before it to finish their reduction
# [3, 3, 3, 3, 3, 2, 2, 2, 1] --> (5 steps to transform the 3's) --> The 2's must wait for 5 numbers
# before it to finish their reduction
# [2, 2, 2, 2, 2, 2, 2, 2, 1] --> (8 steps to transform the 2's) --> The 1's must wait for 8 numbers
# before it to finish their reduction
# [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
#
# Why did we sort in reverse? Because we want to process the maximum / largest number(s) first, which
# is what the question wants. At each step, we can only reduce the largest number(s) to the value of
# the 2nd-largest number(s)
#
# The main idea throughout the algorithm is that - Every time I meet a different number in the
# reverse-sorted array, I have to count how many numbers came before it. This represents the number
# of steps that was taken to reduce these numbers to the current number
# Reference https://leetcode.com/discuss/interview-question/364618/ by Wei_lun
def min_steps_balance(piles):
"""
Time : O(N log N)
Space : O(1), where N = len(s)
"""
# EDGE CASE
if len(piles) < 2:
return 0
# SORT THE BLOCKS
piles = sorted(piles, reverse=True)
# COUNT THE STEPS WE NEED
steps = 0
# EACH TIME WE SEE A DIFFERENT ELEMENT, WE NEED TO SEE HOW MANY ELEMENTS ARE BEFORE IT
for i in range(1, len(piles)):
steps += i if piles[i - 1] != piles[i] else 0
return steps
if __name__ == "__main__":
print(min_steps_balance([50]) == 0)
print(min_steps_balance([10, 10]) == 0)
print(min_steps_balance([5, 2, 1]) == 3)
print(min_steps_balance([4, 2, 1]) == 3)
print(min_steps_balance([4, 5, 5, 4, 2]) == 6)
print(min_steps_balance([4, 8, 16, 32]) == 6)
print(min_steps_balance([4, 8, 8]) == 2)
print(min_steps_balance([4, 4, 8, 8]) == 2)
print(min_steps_balance([1, 2, 2, 3, 3, 4]) == 9)
print(min_steps_balance([1, 1, 2, 2, 2, 3, 3, 3, 4, 4]) == 15) | [
"[email protected]"
] | |
4c2eb9b33282c9d82b39003190a73f99d017c113 | 9405aa570ede31a9b11ce07c0da69a2c73ab0570 | /aliyun-python-sdk-jaq/aliyunsdkjaq/request/v20160412/ScanMalwareRequest.py | 22d33711640845bbae4e9f3f05704d6c77572d61 | [
"Apache-2.0"
] | permissive | liumihust/aliyun-openapi-python-sdk | 7fa3f5b7ea5177a9dbffc99e73cf9f00e640b72b | c7b5dd4befae4b9c59181654289f9272531207ef | refs/heads/master | 2020-09-25T12:10:14.245354 | 2019-12-04T14:43:27 | 2019-12-04T14:43:27 | 226,002,339 | 1 | 0 | NOASSERTION | 2019-12-05T02:50:35 | 2019-12-05T02:50:34 | null | UTF-8 | Python | false | false | 1,280 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class ScanMalwareRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'jaq', '2016-04-12', 'ScanMalware','jaq')
def get_ExtParam(self):
return self.get_query_params().get('ExtParam')
def set_ExtParam(self,ExtParam):
self.add_query_param('ExtParam',ExtParam)
def get_AppInfo(self):
return self.get_query_params().get('AppInfo')
def set_AppInfo(self,AppInfo):
self.add_query_param('AppInfo',AppInfo) | [
"[email protected]"
] | |
11b02e051010453eb013a89954d8791c7033b396 | 4daeb9ebf92d9826028a50bf4e4715c1ab145db1 | /Problem-Set/Numerical_Computing/INSOMA3/run.py | 119445d7f7fd419363c3de405740eab234b29f25 | [] | no_license | Rahul2025/Thesis | 4148653fcc96d623d602ba58e33cc6465d1cd9f5 | df31863194e2e0b69646e3a48fcaf90541a55c2a | refs/heads/master | 2020-05-02T00:48:09.593873 | 2013-04-21T20:23:02 | 2013-04-21T20:23:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | # filename : run.py
import time
f = open('/home/Rahul/Desktop/Thesis/Scripts/cyth_time', 'a')
start = time.time()
import qq18_tim
f.write(str(time.time() - start))
f.close | [
"[email protected]"
] | |
9f4416124cf22e1bd928fc11c7e256b682fd7520 | 7fffc39739869f259fe2d103efa05b87739778d1 | /Python/1149.py | 2396c4e58a35fca7ff1885f711b8fb4acd26d436 | [] | no_license | yunbinni/CodeUp | be39b3bd9fbeaa64be2a77a92918ebcc79b1799b | 5cb95442edb2b766de74154e0b91e8e1c236dd13 | refs/heads/main | 2023-08-15T19:02:29.819912 | 2021-10-12T00:50:16 | 2021-10-12T00:50:16 | 366,761,440 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57 | py | a, b = map(int, input().split())
print(a if a > b else b) | [
"[email protected]"
] | |
fce0c0cbd8334360a36182f7f12a1d6bf9d2bbbb | 66d79b863ef112d3e93770c695751d2f70c94aa5 | /pygeopressure_gui/views/well_log_view.py | 49097e2b311fa64d202ac8cf8d506a05529973d4 | [] | no_license | whimian/pyGeoPressure_gui | 95d11edbd0589d1ef4368298dbac2abdbd76d203 | a36777cf06a48eea681acc481880ee2e9309ac24 | refs/heads/master | 2020-03-11T22:26:56.967910 | 2018-05-09T08:20:48 | 2018-05-09T08:20:48 | 130,292,422 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,505 | py | # -*- coding: utf-8 -*-
"""
a Well log display widget based on matplotlib
Created on Tue May 2nd 2018
"""
from __future__ import (division, absolute_import, print_function,
with_statement, unicode_literals)
from future.builtins import super
__author__ ="Yu Hao"
from pathlib2 import Path
from PyQt4.QtGui import QIcon, QDialog, QFileDialog, QWidget, QHBoxLayout, QGridLayout
from PyQt4 import uic, QtCore
from pygeopressure_gui.ui.ui_well_log_view_control import Ui_well_Log_View_Control
from pygeopressure_gui.widgets.matplotlib_widget import MatplotlibWidget
from pygeopressure_gui.config import CONF
class WellLogView(QWidget):
def __init__(self, parent=None):
super().__init__()
# self.setupUi(self)
self.initUI()
# connect
# self.surveyListWidget.itemSelectionChanged.connect(
# self.display_map_and_info)
# self.surveyButton.clicked.connect(self.on_clicked_surveyButton)
# self.selectButton.clicked.connect(self.on_clicked_selectButton)
# self.load_survey_list()
def initUI(self):
layout = QHBoxLayout(self)
self.control_widget = Well_Log_View_Control(self)
self.matplotlib_widget = MatplotlibWidget(self)
layout.addWidget(self.control_widget)
layout.addWidget(self.matplotlib_widget)
class Well_Log_View_Control(QWidget, Ui_well_Log_View_Control):
def __init__(self, parent=None):
super().__init__()
self.setupUi(self)
| [
"[email protected]"
] | |
2292b674d56303f1c405fe6e8fd757ff063bcd65 | 4e3c976773526fd610d64ffb83589bccfaee5e68 | /sponge-app/sponge-app-demo-service/sponge/sponge_demo_forms_library_args.py | bb2475d0c06d7040720b9ffa792fd6294790e641 | [
"Apache-2.0"
] | permissive | softelnet/sponge | 2313d2328953fcff49a002e727bb803757870627 | 7190f23ae888bbef49d0fbb85157444d6ea48bcd | refs/heads/master | 2022-10-28T16:19:55.619882 | 2021-09-16T19:50:08 | 2021-09-16T19:50:08 | 95,256,030 | 10 | 2 | Apache-2.0 | 2022-10-04T23:55:09 | 2017-06-23T20:58:49 | Java | UTF-8 | Python | false | false | 4,364 | py | """
Sponge Knowledge Base
Demo Forms - Library as action arguments
"""
class ArgLibraryForm(Action):
def onConfigure(self):
self.withLabel("Library (books as arguments)")
self.withArgs([
StringType("search").withNullable().withLabel("Search").withFeature("responsive", True),
StringType("order").withLabel("Sort by").withProvided(ProvidedMeta().withValue().withValueSet()),
# Provided with overwrite to allow GUI refresh.
ListType("books").withLabel("Books").withProvided(ProvidedMeta().withValue().withOverwrite().withDependencies(
["search", "order"])).withFeatures({
"createAction":SubAction("ArgCreateBook"),
"readAction":SubAction("ArgReadBook").withArg("bookId", "@this"),
"updateAction":SubAction("ArgUpdateBook").withArg("bookId", "@this"),
"deleteAction":SubAction("ArgDeleteBook").withArg("bookId", "@this"),
"refreshable":True,
}).withElement(
IntegerType().withAnnotated()
)
]).withNonCallable().withFeature("icon", "library")
def onProvideArgs(self, context):
global LIBRARY
if "order" in context.provide:
context.provided["order"] = ProvidedValue().withValue("author").withAnnotatedValueSet([
AnnotatedValue("author").withValueLabel("Author"), AnnotatedValue("title").withValueLabel("Title")])
if "books" in context.provide:
context.provided["books"] = ProvidedValue().withValue(
map(lambda book: AnnotatedValue(int(book.id)).withValueLabel("{} - {}".format(book.author, book.title)).withValueDescription("Sample description (ID: " + str(book.id) +")"),
sorted(LIBRARY.findBooks(context.current["search"]), key = lambda book: book.author.lower() if context.current["order"] == "author" else book.title.lower())))
class ArgCreateBook(Action):
def onConfigure(self):
self.withLabel("Add a new book")
self.withArgs([
StringType("author").withLabel("Author"),
StringType("title").withLabel("Title")
]).withNoResult()
self.withFeatures({"visible":False, "callLabel":"Save", "cancelLabel":"Cancel"})
def onCall(self, author, title):
global LIBRARY
LIBRARY.addBook(author, title)
class ArgAbstractReadUpdateBook(Action):
def onConfigure(self):
self.withArgs([
IntegerType("bookId").withAnnotated().withFeature("visible", False),
StringType("author").withLabel("Author").withProvided(ProvidedMeta().withValue().withDependency("bookId")),
StringType("title").withLabel("Title").withProvided(ProvidedMeta().withValue().withDependency("bookId"))
]).withNoResult()
self.withFeatures({"visible":False})
def onProvideArgs(self, context):
global LIBRARY
if "author" in context.provide or "title" in context.provide:
book = LIBRARY.getBook(context.current["bookId"].value)
context.provided["author"] = ProvidedValue().withValue(book.author)
context.provided["title"] = ProvidedValue().withValue(book.title)
class ArgReadBook(ArgAbstractReadUpdateBook):
def onConfigure(self):
ArgAbstractReadUpdateBook.onConfigure(self)
self.withLabel("View the book").withNonCallable()
self.withFeatures({"cancelLabel":"Close"})
def onCall(self, bookId, author, title):
pass
class ArgUpdateBook(ArgAbstractReadUpdateBook):
def onConfigure(self):
ArgAbstractReadUpdateBook.onConfigure(self)
self.withLabel("Modify the book")
self.withFeatures({"callLabel":"Save", "cancelLabel":"Cancel"})
def onCall(self, bookId, author, title):
global LIBRARY
LIBRARY.updateBook(bookId.value, author, title)
class ArgDeleteBook(Action):
def onConfigure(self):
self.withLabel("Remove the book")
self.withArgs([
IntegerType("bookId").withAnnotated().withFeature("visible", False),
]).withNoResult()
self.withFeatures({"visible":False, "callLabel":"Save", "cancelLabel":"Cancel"})
def onCall(self, bookId):
global LIBRARY
self.logger.info("Deleting book id: {}", bookId.value)
LIBRARY.removeBook(bookId.value)
| [
"[email protected]"
] | |
7af57a4dd70f7a9cde6ca5ae074f3d75c585b917 | 176481563e0978340c737f61a4f85203ebb65c0b | /ptsites/sites/hdhome.py | f44a99bb7c69afa0dd0174e5d8ff0e93ad0ddf06 | [
"MIT"
] | permissive | ronioncloud/flexget_qbittorrent_mod | 328bd6448e6908bfb8d953e83e826ee8e966eee6 | a163a87328eeadf5ae961db4aec89f3f3ce48ad8 | refs/heads/master | 2023-01-20T03:43:07.664080 | 2020-11-21T10:28:13 | 2020-11-21T10:28:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,114 | py | from ..schema.site_base import SiteBase
from ..schema.nexusphp import NexusPHP
# auto_sign_in
URL = 'https://hdhome.org/attendance.php'
SUCCEED_REGEX = '这是您的第 .* 次签到,已连续签到 .* 天,本次签到获得 .* 个魔力值。|您今天已经签到过了,请勿重复刷新。'
# html_rss
ROOT_ELEMENT_SELECTOR = '#torrenttable > tbody > tr:not(:first-child)'
FIELDS = {
'title': {
'element_selector': 'a[href*="details.php"]',
'attribute': 'title'
},
'url': {
'element_selector': 'a[href*="download.php"]',
'attribute': 'href'
},
'leecher': {
'element_selector': 'td:nth-child(7)',
'attribute': 'textContent'
},
'hr': {
'element_selector': 'img.hitandrun',
'attribute': 'alt'
}
}
class MainClass(NexusPHP):
@staticmethod
def build_sign_in(entry, config):
SiteBase.build_sign_in_entry(entry, config, URL, SUCCEED_REGEX)
@staticmethod
def build_html_rss_config(config):
config['root_element_selector'] = ROOT_ELEMENT_SELECTOR
config['fields'] = FIELDS
| [
"[email protected]"
] | |
eee84b835b4bb691bc922942e4d6e792138a170e | 18fa0ad57cd9c26bc2622ead61b88c81e017e2e8 | /kits/task_master.py | 3452c834c4cdd786cd94314026e08537c0677050 | [] | no_license | weihhh/ECG_pro | 45da18fad4709009cd4766a870fac7c5d5514a92 | 1e013cbb7352ad896661412f036fd9c6242a6001 | refs/heads/master | 2021-05-04T13:52:17.259815 | 2018-07-20T02:39:16 | 2018-07-20T02:39:16 | 120,323,445 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,361 | py | # task_master.py
import random, time, queue
from multiprocessing.managers import BaseManager
# 发送任务的队列:
task_queue = queue.Queue()
# 接收结果的队列:
result_queue = queue.Queue()
# 从BaseManager继承的QueueManager:
class QueueManager(BaseManager):
pass
def return_task_queue():
global task_queue
return task_queue
def return_result_queue():
global result_queue
return result_queue
def task_run():
# 把两个Queue都注册到网络上, callable参数关联了Queue对象:
QueueManager.register('get_task_queue', callable=return_task_queue)
QueueManager.register('get_result_queue', callable=return_result_queue)
# 绑定端口5000, 设置验证码'abc':
manager = QueueManager(address=('127.0.0.1', 5000), authkey=b'abc')
# 启动Queue:
manager.start()
# 获得通过网络访问的Queue对象:
task = manager.get_task_queue()
result = manager.get_result_queue()
# 放几个任务进去:
for i in range(10):
n = random.randint(0, 10000)
print('Put task %d...' % n)
task.put(n)
# 从result队列读取结果:
print('Try get results...')
for i in range(10):
r = result.get(timeout=10)
print('Result: %s' % r)
# 关闭:
manager.shutdown()
print('master exit.')
if __name__ == '__main__':
task_run() | [
"[email protected]"
] | |
7b02be135bb7cd1c50578c242919e30865e8ccb5 | 191a7f83d964f74a2b3c7faeb4fc47d9c63d521f | /.history/main_20210523152024.py | f07a1738d818cc8037a69010ee1541519e6e81c2 | [] | no_license | AndreLiu1225/Kinder-Values-Survey | 2a317feee8d5b17c27da2b2116742656e35d8ab9 | 090c27da0c822abb7dfc0ec6e13ae1b3dcb7bbf3 | refs/heads/master | 2023-05-03T00:26:00.481423 | 2021-06-04T03:24:19 | 2021-06-04T03:24:19 | 371,989,154 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,821 | py | from flask import Flask, render_template, redirect, url_for
from flask_wtf import FlaskForm
from wtforms import StringField, TextField, SubmitField, IntegerField, SelectField, RadioField
from wtforms.validators import DataRequired, Email, EqualTo, Length, ValidationError
app = Flask(__name__)
app.config['SECRET_KEY'] = "0c8973c8a5e001bb0c816a7b56c84f3a"
class MCQ(FlaskForm):
age = IntegerField("Please enter your age", validators=[DataRequired()])
profession = StringField("What is your profession?", validators=[DataRequired(), Length(max=30)])
power = RadioField("Do you desire a higher social status and dominance over others?", choices=[('Yes', 'It is my priority'), ('No', 'It is not my priority')])
tradition = RadioField("Do you desire a higher social status and dominance over others?", choices=[('Yes', 'It is my priority'), ('No', 'It is not my priority')])
achievement = RadioField("Do you desire a higher social status and dominance over others?", choices=[('Yes', 'It is my priority'), ('No', 'It is not my priority')])
stimulation = RadioField("Do you desire a higher social status and dominance over others?", choices=[('Yes', 'It is my priority'), ('No', 'It is not my priority')])
hedonism = RadioField("Do you desire a higher social status and dominance over others?", choices=[('Yes', 'It is my priority'), ('No', 'It is not my priority')])
conformity = RadioField("Do you desire a higher social status and dominance over others?", choices=[('Yes', 'It is my priority'), ('No', 'It is not my priority')])
self_direction = RadioField("Do you desire a higher social status and dominance over others?", choices=[('Yes', 'It is my priority'), ('No', 'It is not my priority')])
submit = SubmitField("Submit")
if __name__ == "__main__":
app.run(debug=True)
| [
"[email protected]"
] | |
01ef591e939e1957ca428570b792b4f0d1913223 | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/bgp/peeropenhist1d.py | a70f146b1976cbe24363ebda9c25b83c7b20cf50 | [] | no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 33,768 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class PeerOpenHist1d(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = StatsClassMeta("cobra.model.bgp.PeerOpenHist1d", "BGP Peer Open")
counter = CounterMeta("updateRcvd", CounterCategory.COUNTER, "packets", "Number of Update Messages Received")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "updateRcvdCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "updateRcvdPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "updateRcvdMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "updateRcvdMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "updateRcvdAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "updateRcvdSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "updateRcvdThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "updateRcvdTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "updateRcvdRate"
meta._counters.append(counter)
counter = CounterMeta("updateSent", CounterCategory.COUNTER, "packets", "Number of Update Messages Sent")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "updateSentCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "updateSentPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "updateSentMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "updateSentMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "updateSentAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "updateSentSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "updateSentThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "updateSentTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "updateSentRate"
meta._counters.append(counter)
counter = CounterMeta("openRcvd", CounterCategory.COUNTER, "packets", "Number of Open Messages Received")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "openRcvdCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "openRcvdPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "openRcvdMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "openRcvdMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "openRcvdAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "openRcvdSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "openRcvdThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "openRcvdTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "openRcvdRate"
meta._counters.append(counter)
counter = CounterMeta("openSent", CounterCategory.COUNTER, "packets", "Number of Open Messages Sent")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "openSentCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "openSentPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "openSentMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "openSentMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "openSentAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "openSentSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "openSentThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "openSentTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "openSentRate"
meta._counters.append(counter)
meta.moClassName = "bgpPeerOpenHist1d"
meta.rnFormat = "HDbgpPeerOpen1d-%(index)s"
meta.category = MoCategory.STATS_HISTORY
meta.label = "historical BGP Peer Open stats in 1 day"
meta.writeAccessMask = 0x8008020040001
meta.readAccessMask = 0x8008020040001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = True
meta.parentClasses.add("cobra.model.bgp.Peer")
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.stats.Hist")
meta.superClasses.add("cobra.model.bgp.PeerOpenHist")
meta.rnPrefixes = [
('HDbgpPeerOpen1d-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "index", "index", 47730, PropCategory.REGULAR)
prop.label = "History Index"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("index", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "openRcvdAvg", "openRcvdAvg", 48123, PropCategory.IMPLICIT_AVG)
prop.label = "Number of Open Messages Received average value"
prop.isOper = True
prop.isStats = True
meta.props.add("openRcvdAvg", prop)
prop = PropMeta("str", "openRcvdCum", "openRcvdCum", 48119, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "Number of Open Messages Received cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("openRcvdCum", prop)
prop = PropMeta("str", "openRcvdMax", "openRcvdMax", 48122, PropCategory.IMPLICIT_MAX)
prop.label = "Number of Open Messages Received maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("openRcvdMax", prop)
prop = PropMeta("str", "openRcvdMin", "openRcvdMin", 48121, PropCategory.IMPLICIT_MIN)
prop.label = "Number of Open Messages Received minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("openRcvdMin", prop)
prop = PropMeta("str", "openRcvdPer", "openRcvdPer", 48120, PropCategory.IMPLICIT_PERIODIC)
prop.label = "Number of Open Messages Received periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("openRcvdPer", prop)
prop = PropMeta("str", "openRcvdRate", "openRcvdRate", 48127, PropCategory.IMPLICIT_RATE)
prop.label = "Number of Open Messages Received rate"
prop.isOper = True
prop.isStats = True
meta.props.add("openRcvdRate", prop)
prop = PropMeta("str", "openRcvdSpct", "openRcvdSpct", 48124, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Number of Open Messages Received suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("openRcvdSpct", prop)
prop = PropMeta("str", "openRcvdThr", "openRcvdThr", 48125, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Number of Open Messages Received thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("openRcvdThr", prop)
prop = PropMeta("str", "openRcvdTr", "openRcvdTr", 48126, PropCategory.IMPLICIT_TREND)
prop.label = "Number of Open Messages Received trend"
prop.isOper = True
prop.isStats = True
meta.props.add("openRcvdTr", prop)
prop = PropMeta("str", "openSentAvg", "openSentAvg", 48144, PropCategory.IMPLICIT_AVG)
prop.label = "Number of Open Messages Sent average value"
prop.isOper = True
prop.isStats = True
meta.props.add("openSentAvg", prop)
prop = PropMeta("str", "openSentCum", "openSentCum", 48140, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "Number of Open Messages Sent cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("openSentCum", prop)
prop = PropMeta("str", "openSentMax", "openSentMax", 48143, PropCategory.IMPLICIT_MAX)
prop.label = "Number of Open Messages Sent maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("openSentMax", prop)
prop = PropMeta("str", "openSentMin", "openSentMin", 48142, PropCategory.IMPLICIT_MIN)
prop.label = "Number of Open Messages Sent minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("openSentMin", prop)
prop = PropMeta("str", "openSentPer", "openSentPer", 48141, PropCategory.IMPLICIT_PERIODIC)
prop.label = "Number of Open Messages Sent periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("openSentPer", prop)
prop = PropMeta("str", "openSentRate", "openSentRate", 48148, PropCategory.IMPLICIT_RATE)
prop.label = "Number of Open Messages Sent rate"
prop.isOper = True
prop.isStats = True
meta.props.add("openSentRate", prop)
prop = PropMeta("str", "openSentSpct", "openSentSpct", 48145, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Number of Open Messages Sent suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("openSentSpct", prop)
prop = PropMeta("str", "openSentThr", "openSentThr", 48146, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Number of Open Messages Sent thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("openSentThr", prop)
prop = PropMeta("str", "openSentTr", "openSentTr", 48147, PropCategory.IMPLICIT_TREND)
prop.label = "Number of Open Messages Sent trend"
prop.isOper = True
prop.isStats = True
meta.props.add("openSentTr", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "updateRcvdAvg", "updateRcvdAvg", 48165, PropCategory.IMPLICIT_AVG)
prop.label = "Number of Update Messages Received average value"
prop.isOper = True
prop.isStats = True
meta.props.add("updateRcvdAvg", prop)
prop = PropMeta("str", "updateRcvdCum", "updateRcvdCum", 48161, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "Number of Update Messages Received cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("updateRcvdCum", prop)
prop = PropMeta("str", "updateRcvdMax", "updateRcvdMax", 48164, PropCategory.IMPLICIT_MAX)
prop.label = "Number of Update Messages Received maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("updateRcvdMax", prop)
prop = PropMeta("str", "updateRcvdMin", "updateRcvdMin", 48163, PropCategory.IMPLICIT_MIN)
prop.label = "Number of Update Messages Received minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("updateRcvdMin", prop)
prop = PropMeta("str", "updateRcvdPer", "updateRcvdPer", 48162, PropCategory.IMPLICIT_PERIODIC)
prop.label = "Number of Update Messages Received periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("updateRcvdPer", prop)
prop = PropMeta("str", "updateRcvdRate", "updateRcvdRate", 48169, PropCategory.IMPLICIT_RATE)
prop.label = "Number of Update Messages Received rate"
prop.isOper = True
prop.isStats = True
meta.props.add("updateRcvdRate", prop)
prop = PropMeta("str", "updateRcvdSpct", "updateRcvdSpct", 48166, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Number of Update Messages Received suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("updateRcvdSpct", prop)
prop = PropMeta("str", "updateRcvdThr", "updateRcvdThr", 48167, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Number of Update Messages Received thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("updateRcvdThr", prop)
prop = PropMeta("str", "updateRcvdTr", "updateRcvdTr", 48168, PropCategory.IMPLICIT_TREND)
prop.label = "Number of Update Messages Received trend"
prop.isOper = True
prop.isStats = True
meta.props.add("updateRcvdTr", prop)
prop = PropMeta("str", "updateSentAvg", "updateSentAvg", 48186, PropCategory.IMPLICIT_AVG)
prop.label = "Number of Update Messages Sent average value"
prop.isOper = True
prop.isStats = True
meta.props.add("updateSentAvg", prop)
prop = PropMeta("str", "updateSentCum", "updateSentCum", 48182, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "Number of Update Messages Sent cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("updateSentCum", prop)
prop = PropMeta("str", "updateSentMax", "updateSentMax", 48185, PropCategory.IMPLICIT_MAX)
prop.label = "Number of Update Messages Sent maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("updateSentMax", prop)
prop = PropMeta("str", "updateSentMin", "updateSentMin", 48184, PropCategory.IMPLICIT_MIN)
prop.label = "Number of Update Messages Sent minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("updateSentMin", prop)
prop = PropMeta("str", "updateSentPer", "updateSentPer", 48183, PropCategory.IMPLICIT_PERIODIC)
prop.label = "Number of Update Messages Sent periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("updateSentPer", prop)
prop = PropMeta("str", "updateSentRate", "updateSentRate", 48190, PropCategory.IMPLICIT_RATE)
prop.label = "Number of Update Messages Sent rate"
prop.isOper = True
prop.isStats = True
meta.props.add("updateSentRate", prop)
prop = PropMeta("str", "updateSentSpct", "updateSentSpct", 48187, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Number of Update Messages Sent suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("updateSentSpct", prop)
prop = PropMeta("str", "updateSentThr", "updateSentThr", 48188, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Number of Update Messages Sent thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("updateSentThr", prop)
prop = PropMeta("str", "updateSentTr", "updateSentTr", 48189, PropCategory.IMPLICIT_TREND)
prop.label = "Number of Update Messages Sent trend"
prop.isOper = True
prop.isStats = True
meta.props.add("updateSentTr", prop)
meta.namingProps.append(getattr(meta.props, "index"))
def __init__(self, parentMoOrDn, index, markDirty=True, **creationProps):
namingVals = [index]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
a2bd0e9dc99c1faf53437fb4076d01fd4561fe8c | 62bbfb6c50bba16304202aea96d1de4990f95e04 | /dependencies/pulumi_aws/ssm/maintenance_window_target.py | 27e01ee014ac88dc17d6881ab9cdc5a010ea8e33 | [] | no_license | adriell/lambda-autoservico-storagegateway | b40b8717c8de076e61bbd422461c7d624a0d2273 | f6e3dea61b004b73943a5438c658d3f019f106f7 | refs/heads/main | 2023-03-16T14:41:16.821675 | 2021-03-11T03:30:33 | 2021-03-11T03:30:33 | 345,865,704 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 10,674 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['MaintenanceWindowTarget']
class MaintenanceWindowTarget(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
owner_information: Optional[pulumi.Input[str]] = None,
resource_type: Optional[pulumi.Input[str]] = None,
targets: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MaintenanceWindowTargetTargetArgs']]]]] = None,
window_id: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Provides an SSM Maintenance Window Target resource
## Instance Target Example Usage
```python
import pulumi
import pulumi_aws as aws
window = aws.ssm.MaintenanceWindow("window",
schedule="cron(0 16 ? * TUE *)",
duration=3,
cutoff=1)
target1 = aws.ssm.MaintenanceWindowTarget("target1",
window_id=window.id,
description="This is a maintenance window target",
resource_type="INSTANCE",
targets=[aws.ssm.MaintenanceWindowTargetTargetArgs(
key="tag:Name",
values=["acceptance_test"],
)])
```
## Resource Group Target Example Usage
```python
import pulumi
import pulumi_aws as aws
window = aws.ssm.MaintenanceWindow("window",
schedule="cron(0 16 ? * TUE *)",
duration=3,
cutoff=1)
target1 = aws.ssm.MaintenanceWindowTarget("target1",
window_id=window.id,
description="This is a maintenance window target",
resource_type="RESOURCE_GROUP",
targets=[aws.ssm.MaintenanceWindowTargetTargetArgs(
key="resource-groups:ResourceTypeFilters",
values=["AWS::EC2::Instance"],
)])
```
## Import
SSM Maintenance Window targets can be imported using `WINDOW_ID/WINDOW_TARGET_ID`, e.g.
```sh
$ pulumi import aws:ssm/maintenanceWindowTarget:MaintenanceWindowTarget example mw-0c50858d01EXAMPLE/23639a0b-ddbc-4bca-9e72-78d96EXAMPLE
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: The description of the maintenance window target.
:param pulumi.Input[str] name: The name of the maintenance window target.
:param pulumi.Input[str] owner_information: User-provided value that will be included in any CloudWatch events raised while running tasks for these targets in this Maintenance Window.
:param pulumi.Input[str] resource_type: The type of target being registered with the Maintenance Window. Possible values are `INSTANCE` and `RESOURCE_GROUP`.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MaintenanceWindowTargetTargetArgs']]]] targets: The targets to register with the maintenance window. In other words, the instances to run commands on when the maintenance window runs. You can specify targets using instance IDs, resource group names, or tags that have been applied to instances. For more information about these examples formats see
(https://docs.aws.amazon.com/systems-manager/latest/userguide/mw-cli-tutorial-targets-examples.html)
:param pulumi.Input[str] window_id: The Id of the maintenance window to register the target with.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['description'] = description
__props__['name'] = name
__props__['owner_information'] = owner_information
if resource_type is None and not opts.urn:
raise TypeError("Missing required property 'resource_type'")
__props__['resource_type'] = resource_type
if targets is None and not opts.urn:
raise TypeError("Missing required property 'targets'")
__props__['targets'] = targets
if window_id is None and not opts.urn:
raise TypeError("Missing required property 'window_id'")
__props__['window_id'] = window_id
super(MaintenanceWindowTarget, __self__).__init__(
'aws:ssm/maintenanceWindowTarget:MaintenanceWindowTarget',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
owner_information: Optional[pulumi.Input[str]] = None,
resource_type: Optional[pulumi.Input[str]] = None,
targets: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MaintenanceWindowTargetTargetArgs']]]]] = None,
window_id: Optional[pulumi.Input[str]] = None) -> 'MaintenanceWindowTarget':
"""
Get an existing MaintenanceWindowTarget resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: The description of the maintenance window target.
:param pulumi.Input[str] name: The name of the maintenance window target.
:param pulumi.Input[str] owner_information: User-provided value that will be included in any CloudWatch events raised while running tasks for these targets in this Maintenance Window.
:param pulumi.Input[str] resource_type: The type of target being registered with the Maintenance Window. Possible values are `INSTANCE` and `RESOURCE_GROUP`.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MaintenanceWindowTargetTargetArgs']]]] targets: The targets to register with the maintenance window. In other words, the instances to run commands on when the maintenance window runs. You can specify targets using instance IDs, resource group names, or tags that have been applied to instances. For more information about these examples formats see
(https://docs.aws.amazon.com/systems-manager/latest/userguide/mw-cli-tutorial-targets-examples.html)
:param pulumi.Input[str] window_id: The Id of the maintenance window to register the target with.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["description"] = description
__props__["name"] = name
__props__["owner_information"] = owner_information
__props__["resource_type"] = resource_type
__props__["targets"] = targets
__props__["window_id"] = window_id
return MaintenanceWindowTarget(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
The description of the maintenance window target.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the maintenance window target.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="ownerInformation")
def owner_information(self) -> pulumi.Output[Optional[str]]:
"""
User-provided value that will be included in any CloudWatch events raised while running tasks for these targets in this Maintenance Window.
"""
return pulumi.get(self, "owner_information")
@property
@pulumi.getter(name="resourceType")
def resource_type(self) -> pulumi.Output[str]:
"""
The type of target being registered with the Maintenance Window. Possible values are `INSTANCE` and `RESOURCE_GROUP`.
"""
return pulumi.get(self, "resource_type")
@property
@pulumi.getter
def targets(self) -> pulumi.Output[Sequence['outputs.MaintenanceWindowTargetTarget']]:
"""
The targets to register with the maintenance window. In other words, the instances to run commands on when the maintenance window runs. You can specify targets using instance IDs, resource group names, or tags that have been applied to instances. For more information about these examples formats see
(https://docs.aws.amazon.com/systems-manager/latest/userguide/mw-cli-tutorial-targets-examples.html)
"""
return pulumi.get(self, "targets")
@property
@pulumi.getter(name="windowId")
def window_id(self) -> pulumi.Output[str]:
"""
The Id of the maintenance window to register the target with.
"""
return pulumi.get(self, "window_id")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| [
"[email protected]"
] | |
c27d32cdd1b7b026779339fc68a7830871f34535 | 4dd811c2595a990cb21327afe7a2dfd54ba9b52f | /Open-CV_Basics/threshold_example.py | 3b3018af5e505a219059d82806e1a161262e2bc4 | [] | no_license | mitesh55/Deep-Learning | 40842af8b8f1ea3c041fa4de2e76d068f554c1a4 | 0a7c0305edb027acfaad38e7abe52808260cede9 | refs/heads/master | 2023-07-25T14:31:14.285489 | 2021-09-09T13:46:31 | 2021-09-09T13:46:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,288 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 24 09:09:57 2020
@author: pavankunchala
"""
import cv2
from matplotlib import pyplot as plt
def show_img_with_matplotlib(color_img, title, pos):
"""Shows an image using matplotlib capabilities"""
# Convert BGR image to RGB
img_RGB = color_img[:, :, ::-1]
ax = plt.subplot(3, 3, pos)
plt.imshow(img_RGB)
plt.title(title)
plt.axis('off')
# Create the dimensions of the figure and set title and color:
fig = plt.figure(figsize=(9, 9))
plt.suptitle("Thresholding example", fontsize=14, fontweight='bold')
fig.patch.set_facecolor('silver')
# Load the image and convert it to grayscale:
image = cv2.imread('img.png')
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Plot the grayscale image:
show_img_with_matplotlib(cv2.cvtColor(gray_image, cv2.COLOR_GRAY2BGR), "img", 1)
# Apply cv2.threshold() with different thresholding values:
ret1, thresh1 = cv2.threshold(gray_image, 60, 255, cv2.THRESH_BINARY)
ret2, thresh2 = cv2.threshold(gray_image, 70, 255, cv2.THRESH_BINARY)
ret3, thresh3 = cv2.threshold(gray_image, 80, 255, cv2.THRESH_BINARY)
ret4, thresh4 = cv2.threshold(gray_image, 90, 255, cv2.THRESH_BINARY)
ret5, thresh5 = cv2.threshold(gray_image, 100, 255, cv2.THRESH_BINARY)
ret6, thresh6 = cv2.threshold(gray_image, 110, 255, cv2.THRESH_BINARY)
ret7, thresh7 = cv2.threshold(gray_image, 120, 255, cv2.THRESH_BINARY)
ret8, thresh8 = cv2.threshold(gray_image, 130, 255, cv2.THRESH_BINARY)
# Plot all the thresholded images:
show_img_with_matplotlib(cv2.cvtColor(thresh1, cv2.COLOR_GRAY2BGR), "threshold = 60", 2)
show_img_with_matplotlib(cv2.cvtColor(thresh2, cv2.COLOR_GRAY2BGR), "threshold = 70", 3)
show_img_with_matplotlib(cv2.cvtColor(thresh3, cv2.COLOR_GRAY2BGR), "threshold = 80", 4)
show_img_with_matplotlib(cv2.cvtColor(thresh4, cv2.COLOR_GRAY2BGR), "threshold = 90", 5)
show_img_with_matplotlib(cv2.cvtColor(thresh5, cv2.COLOR_GRAY2BGR), "threshold = 100", 6)
show_img_with_matplotlib(cv2.cvtColor(thresh6, cv2.COLOR_GRAY2BGR), "threshold = 110", 7)
show_img_with_matplotlib(cv2.cvtColor(thresh7, cv2.COLOR_GRAY2BGR), "threshold = 120", 8)
show_img_with_matplotlib(cv2.cvtColor(thresh8, cv2.COLOR_GRAY2BGR), "threshold = 130", 9)
# Show the Figure:
plt.show() | [
"[email protected]"
] | |
500729c399198a7f347c002ba8524217d5ad7c11 | 4324d19af69080f45ff60b733c940f7dc1aa6dae | /google-ads-python/google/ads/google_ads/v0/proto/services/ad_group_bid_modifier_service_pb2_grpc.py | 323a703193e94e53842992d5f9f98450e7ec0307 | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | ljborton/Forked_Work | cc8a3813c146ea4547aca9caeb03e649bbdb9076 | 7aaf67af8d9f86f9dc0530a1ad23951bcb535c92 | refs/heads/master | 2023-07-19T22:26:48.085129 | 2019-11-27T02:53:51 | 2019-11-27T02:53:51 | 224,321,748 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,600 | py | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.ads.google_ads.v0.proto.resources import ad_group_bid_modifier_pb2 as google_dot_ads_dot_googleads__v0_dot_proto_dot_resources_dot_ad__group__bid__modifier__pb2
from google.ads.google_ads.v0.proto.services import ad_group_bid_modifier_service_pb2 as google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_ad__group__bid__modifier__service__pb2
class AdGroupBidModifierServiceStub(object):
"""Service to manage ad group bid modifiers.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetAdGroupBidModifier = channel.unary_unary(
'/google.ads.googleads.v0.services.AdGroupBidModifierService/GetAdGroupBidModifier',
request_serializer=google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_ad__group__bid__modifier__service__pb2.GetAdGroupBidModifierRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v0_dot_proto_dot_resources_dot_ad__group__bid__modifier__pb2.AdGroupBidModifier.FromString,
)
self.MutateAdGroupBidModifiers = channel.unary_unary(
'/google.ads.googleads.v0.services.AdGroupBidModifierService/MutateAdGroupBidModifiers',
request_serializer=google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_ad__group__bid__modifier__service__pb2.MutateAdGroupBidModifiersRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_ad__group__bid__modifier__service__pb2.MutateAdGroupBidModifiersResponse.FromString,
)
class AdGroupBidModifierServiceServicer(object):
"""Service to manage ad group bid modifiers.
"""
def GetAdGroupBidModifier(self, request, context):
"""Returns the requested ad group bid modifier in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MutateAdGroupBidModifiers(self, request, context):
"""Creates, updates, or removes ad group bid modifiers.
Operation statuses are returned.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_AdGroupBidModifierServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetAdGroupBidModifier': grpc.unary_unary_rpc_method_handler(
servicer.GetAdGroupBidModifier,
request_deserializer=google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_ad__group__bid__modifier__service__pb2.GetAdGroupBidModifierRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v0_dot_proto_dot_resources_dot_ad__group__bid__modifier__pb2.AdGroupBidModifier.SerializeToString,
),
'MutateAdGroupBidModifiers': grpc.unary_unary_rpc_method_handler(
servicer.MutateAdGroupBidModifiers,
request_deserializer=google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_ad__group__bid__modifier__service__pb2.MutateAdGroupBidModifiersRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v0_dot_proto_dot_services_dot_ad__group__bid__modifier__service__pb2.MutateAdGroupBidModifiersResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v0.services.AdGroupBidModifierService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| [
"[email protected]"
] | |
c199a8c9d14036ef6e195fc868e7c99f7939151e | 29ac2c1477b2972820dd024ee443b8626e3224cf | /gallery/migrations/0002_auto_20210608_0440.py | 87afdaaf43bc510f256b14412bc81c86310e8178 | [] | no_license | devArist/school_project3 | 5cdf1955e60ff727a64cf7cb50987da70149b0b8 | c153339cf55a87cb81b331ce7fbd43615a0435aa | refs/heads/main | 2023-05-09T19:48:32.092513 | 2021-06-08T10:07:25 | 2021-06-08T10:07:25 | 374,965,640 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 559 | py | # Generated by Django 3.2.3 on 2021-06-08 04:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gallery', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='photo',
name='formats',
field=models.CharField(max_length=50, null=True, verbose_name='format'),
),
migrations.AddField(
model_name='video',
name='duration',
field=models.DurationField(null=True),
),
]
| [
"[email protected]"
] | |
a68d760a069c84b3e5c6d1a92d06ab3f2bd4fd43 | 950ce2ac7aa569c7e82bd135492c0ff94ef97c74 | /test/myapp/myapp/spiders/item_xpath.py | 4f8a0120bf12e339747335c62122f88055842a82 | [] | no_license | ShichaoMa/webWalker | 74ea7f38b74272d951e8bccea730a40f7f0c72b4 | 8c085d6f385f85f193b0992b1148f165652e3a98 | refs/heads/master | 2021-01-12T16:50:36.331337 | 2017-12-08T11:26:46 | 2017-12-08T11:26:46 | 71,446,722 | 62 | 18 | null | 2017-12-08T11:26:47 | 2016-10-20T09:27:08 | Python | UTF-8 | Python | false | false | 236 | py | # -*- coding:utf-8 -*-
ITEM_XPATH = {
"bluefly": [
'//ul[@class="mz-productlist-list mz-l-tiles"]/li//a[@class="mz-productlisting-title"]/@href',
],
"douban": [
"//a[@class='product-photo']/@href",
]
}
| [
"[email protected]"
] | |
d0a580aaa5edce4979a869971401b95f3b920d70 | 1b862f34c125ce200244dd79e4fda4b5b605ce2e | /.history/ML_EO_20210607233236.py | 524991d44904097561ba54152a436c881885c12f | [] | no_license | edwino26/CoreImages | 26085a49cf1cb79442ae563a88354b2fdceace87 | 6bf6e68cac8ab36c87b1e6ea702bfe6882b0f40e | refs/heads/master | 2023-06-22T12:53:37.344895 | 2021-07-21T04:31:44 | 2021-07-21T04:31:44 | 309,553,247 | 0 | 4 | null | 2021-04-29T23:23:15 | 2020-11-03T02:45:07 | Lasso | UTF-8 | Python | false | false | 25,622 | py | # # lectura de los xlsx del procesamiento de imagenes
# y la informacion obtenida de los registros por pozo
# %%
import numpy as np
import pandas as pd
import os
import os.path
import matplotlib.pyplot as plt # GRAPHS
from mlxtend.plotting import scatterplotmatrix
import glob
import seaborn as sns
import missingno as msno
import math
import itertools
import smogn
import pickle
from scipy import interpolate
#Clustering packages
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPRegressor
from sklearn.datasets import make_regression
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn import linear_model
from sklearn.metrics import mean_squared_error
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import SCORERS
# ===============================================
from ML_EO_GridSearch import *
GRIDSEARCH = 'on' #GridSearch Option
#%% ====================== Main Dataframe Computation =====================
def round_depth(number):
return round(number * 4, 0) / 4 #Use 2 to round to closest 0.5, 4 to 0.25
# Data load
T2 = pd.read_excel('./Excel_Files/T2_rs.xls',sheet_name='T2_data')
T6 = pd.read_excel('./Excel_Files/T6_rs.xls',sheet_name='T6_data')
U18 = pd.read_excel('./Excel_Files/U18_rs.xls',sheet_name='U18_data')
T2_img = pd.read_excel('./Excel_Files/Processed_Images_T2.xls',sheet_name='T2').rename(columns={"DEPTH": "DEPT"})
T6_img = pd.read_excel('./Excel_Files/Processed_Images_T6.xls',sheet_name='T6').rename(columns={"DEPTH": "DEPT"})
U18_img = pd.read_excel('./Excel_Files/Processed_Images_U18.xls',sheet_name='U18').rename(columns={"DEPTH": "DEPT"})
# %%
# Conditioning before joining well dataset pairs
T2['DEPT'] = T2['DEPTH'].astype(float).apply(lambda x: round_depth(x))
T2_img['DEPT'] = T2_img['DEPT'].astype(float).apply(lambda x: round_depth(x))
T6['DEPT'] = T6['DEPTH'].astype(float).apply(lambda x: round_depth(x))
T6_img['DEPT'] = T6_img['DEPT'].astype(float).apply(lambda x: round_depth(x))
U18['DEPT'] = U18['TDEP'].astype(float).apply(lambda x: round_depth(x))
U18_img['DEPT'] = U18_img['DEPT'].astype(float).apply(lambda x: round_depth(x))
# U18 had too many images compared to other 2 wells. Take a smaller set to have a more balanced dataset
U18 = U18[U18['DEPT']>771]
U18 = U18[U18['DEPT']<925]
# Join only depths present at both datasets within a pair
T2 = T2_img.merge(T2, on='DEPT', how='inner').dropna()
T6 = T6_img.merge(T6, on='DEPT', how='inner').dropna()
U18 = U18_img.merge(U18, on='DEPT', how='inner').dropna()
# Specific curves to be used in ML algorithm
logset = ['DEPT', 'GR_EDTC', 'RHOZ', 'AT90', 'DTCO', 'NPHI', 'WELL', 'GRAY']
#Scale Images as bright and dim points realte to presence of oil but images were
# acquired by diffeent labs at different times, using different equipment, etc
scaler = MinMaxScaler()
T2['GRAY'] = 255*scaler.fit_transform(T2['GRAY'].values.reshape(-1,1))
T6['GRAY'] = 255*scaler.fit_transform(T6['GRAY'].values.reshape(-1,1))
U18['GRAY'] = 255*scaler.fit_transform(U18['GRAY'].values.reshape(-1,1))
# %%
# ---------- Main Data Frame Structuring --------
#Train on T6 adn U18
df = T2[logset].append(T6[logset]).append(U18[logset]).rename(columns={"GR_EDTC": "GR", "AT90": "logRT", "RHOZ": "RHOB", "WELL": "Well"}).set_index('Well')
#df = T2[logset].append(T6[logset]).rename(columns={"GR_EDTC": "GR", "AT90": "logRT", "RHOZ": "RHOB", "WELL": "Well"}).set_index('Well')
df['logRT'] = df['logRT'].apply(lambda x: math.log10(x))
df.reset_index(inplace=True)
df.to_excel('./ML_Results/df.xlsx','Original')
df['Pay'] = df['GRAY'].apply(lambda x: 1 if x> 170 else 0)
data = df.copy()
data= data[["GR", "RHOB", "logRT", "DTCO", "NPHI", "GRAY", "Pay","DEPT", "Well"]]
data.head(100)
#train, test = train_test_split(data, test_size=0.3, random_state=42)
#train = data[(data['Well']!='T6') & (data['GRAY']>10)].copy()
# train = data[(data['Well']!='T2')]
# test = data[data['Well']=='T2']
#test = data[(data['DEPT']>3670) &(data['DEPT']<3720)]
#train = data[~data.index.isin(test.index)]
train, test = train_test_split(data, test_size=0.2, random_state=42)
train.reset_index(inplace=True, drop=True)
test.reset_index(inplace=True, drop=True)
# %% Pre-processing: Data Balancing
# Original data is imbalanced as there's more dark than bright in all UV photos
# SMOGN drops certain samples to re-balance the dataset
# Reference: http://proceedings.mlr.press/v74/branco17a/branco17a.pdf
## specify phi relevance values
balance = 0
if balance == 1:
train_smogn = smogn.smoter(
data = train, ## pandas dataframe
y = 'GRAY', ## string ('target header name')
pert = 0.01,
k = 3,
rel_thres = 0.5, ## relevance threshold considered rare (pos real)
rel_method = "auto", ## relevance method ("auto" or "manual")
rel_xtrm_type = "both", ## distribution focus ("high", "low", "both")
rel_coef = 0.5, ## coefficient for box plot (pos real)
rel_ctrl_pts_rg = None
)
## Check changes in target variable distribution
smogn.box_plot_stats(train['GRAY'])['stats']
smogn.box_plot_stats(train_smogn['GRAY'])['stats']
## plot y distribution
sns.kdeplot(train['GRAY'], label = "Original")
sns.kdeplot(train_smogn['GRAY'], label = "Modified")
del train
train = train_smogn.copy()
train.to_excel('./ML_Results/train_smogn.xlsx','Smogn')
#-------------------------------
# %% ========================= Machine Learning: PROCESSING ===============================
augmentation = 0
## Attempt Augmentation
if augmentation ==1:
train.sort_values('DEPT', inplace=True)
aa = train.copy()
s1 = pd.Series([0])
aa.GRAY = aa.GRAY[1:].append(s1).values
ab = train.copy()
ab.GRAY = s1.append(ab.GRAY[0:-1]).values
s2 = pd.Series([0, 0])
ac = train.copy()
ac.GRAY = aa.GRAY[2:].append(s2).values
ad = train.copy()
ad.GRAY = s2.append(ab.GRAY[0:-2]).values
train = train.append(aa).append(ab).append(ac).append(ad)
scaler = StandardScaler()
print("Test Dataset Shape :::::: ", test.shape)
print("Train Dataset Shape :::::: ", train.shape)
train.to_excel('./ML_Results/train.xlsx','Train')
test.to_excel('./ML_Results/test.xlsx','Test')
option = 'Gray' #or 'Pay'
if option == 'Gray':
target_col = 5
else:
target_col = 6
X = train.iloc[:, [0,1,2,3,4]]
y = train.iloc[:, [target_col]] #5 is GRAY, #6 is Pay
X_test = test.iloc[:, [0,1,2,3,4]]
y_test = test.iloc[:, [target_col]]
# Scaling
#Find scaling parameters based on training data only
scaler = StandardScaler()
scaler.fit(X)
print(scaler.mean_)
X = scaler.transform(X)
X_test = scaler.transform(X_test)
# %%
plot_descriptive = 1
if plot_descriptive == 1:
# ===================== Descriptive Statistics and Plots =====================
print(df.groupby(['Well']).median())
print(df.groupby(['Well']).count())
print(df.groupby(['Well']).min())
print(df.groupby(['Well']).max())
# Show GR distribution among wells
fig,ax = plt.subplots()
hatches = ('\\', '//', '..') # fill pattern
alpha_v = 0.9
for (i, d),hatch in zip(df.groupby('Well'), hatches):
d['GR'].hist(alpha=alpha_v, ax=ax, label=i, hatch=hatch)
alpha_v -= 0.3
ax.legend()
#Show how the gray scale varies among each photo
fig,ax = plt.subplots()
hatches = ('\\', '//', '..') # fill pattern
alpha_v = 0.9
for (i, d),hatch in zip(df.groupby('Well'), hatches):
d['GRAY'].hist(alpha=alpha_v, ax=ax, label=i, hatch=hatch)
alpha_v -= 0.3
ax.legend()
# Expected correlations between variables
fig, axss = plt.subplots(2, 2, figsize=(5, 5))
axss[0, 0].hist(df['GRAY'])
axss[1, 0].plot(df['GR'], df['GRAY'], linestyle='None', markersize=4, marker='o')
axss[0, 1].plot(df['RHOB'], df['GRAY'], linestyle='None', markersize=4, marker='o')
axss[1, 1].hist2d(df['logRT'], df['GRAY'])
plt.show()
# Matrix Plot
# variables= ['GR', 'RHOB', 'logRT', 'DTCO', 'NPHI', 'GRAY']
# fig, axes = scatterplotmatrix(df[df['Well']=='T2'].drop(['Well', 'DEPT'], axis=1).values, figsize=(8, 6), alpha=0.5)
# fig, axes = scatterplotmatrix(df[df['Well']=='T6'].drop(['Well', 'DEPT'], axis=1).values, fig_axes=(fig, axes), alpha=0.5, names=variables)
# #fig, axes = scatterplotmatrix(df[df['Well']=='U18'].drop(['Well', 'DEPT'], axis=1).values, fig_axes=(fig, axes), alpha=0.5, names=variables)
# plt.tight_layout()
# plt.show()
#With SB
#sns.set_theme(style="ticks")
#sns.pairplot(train, kind="kde")
# Note: Scaling the target is nor necessary, nor advised
# ====================================================================
# ================ Machine Learning: MODELING =======================
# ====================================================================
print(df.shape)
Methods = ['RandomForest', 'Lasso', 'ElasticNet', 'Ridge', 'SVR', 'GradientBoosting', 'MLP']
error = pd.DataFrame(index=['RMSE', 'MSE'], columns=Methods)
# %% ------------------- Averaging: Random Forest Regressor ---------------------
# https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html
if GRIDSEARCH == 'on':
rgr = RandomForestRegressorGS(X, np.ravel(y))
rgr.fit(X, np.ravel(y))
else:
rgr = RandomForestRegressor(n_estimators=100, criterion='mse')
rgr.fit(X, np.ravel(y))
print("Relative importance of GR, RHOB, logRt, DTCO, NPHI", rgr.feature_importances_)
y_pred_train = rgr.predict(X)
y_pred_test = rgr.predict(X_test)
mse = mean_squared_error(y_test, y_pred_test)
rmse_train = mean_squared_error(y, y_pred_train, squared=False)
rmse = mean_squared_error(y_test, y_pred_test, squared=False)
fig, axs = plt.subplots(1, 2, constrained_layout=True, figsize=(8,4))
axs[0].set_title('Train '+option)
axs[0].plot(y, y, 'blue'); axs[0].set_xlabel('True '+option); axs[0].set_ylabel('Predicted '+option)
axs[0].plot(y, y_pred_train, 'ko')
axs[0].text(-0.3, 0.08, 'RMSE = '+str(round(rmse_train,2)), verticalalignment='bottom', horizontalalignment='right', transform=axs[1].transAxes,color='black', fontsize=10)
axs[1].plot(y_test, y_test, 'blue')
axs[1].plot(y_test, y_pred_test, 'go')
axs[1].set_title('Test '+option)
axs[1].text(1.2, 0.08, 'RMSE = '+str(round(rmse,2)), verticalalignment='bottom', horizontalalignment='right', transform=axs[1].transAxes,color='green', fontsize=10)
axs[1].plot(y, y, 'blue'); axs[1].set_xlabel('True '+option);
plt.show()
#ct +=1
ct =0
method = Methods[ct]
y_pred_train = pd.DataFrame(y_pred_train, columns=['y_pred_train'], index=y.index)
y_pred_test = pd.DataFrame(y_pred_test, columns=['y_pred_test'], index=y_test.index)
train_all = train.copy()
train_all[method] = y_pred_train
test_all = test.copy()
test_all[method] = y_pred_test
error.loc['MSE', method] = mse.round()
error.loc['RMSE', method] = rmse.round()
fig.savefig('./ML_Results/'+str(ct)+"_"+method+'.jpg')
#Save Model
pkl_filename = method+".pkl"
with open('./ML_Results/models/'+pkl_filename, 'wb') as file:
pickle.dump(rgr, file)
#Load Model and Validate holdout Well
with open('./ML_Results/models/'+pkl_filename, 'rb') as file:
model = pickle.load(file)
print(error)
#----------------------- End Random Forest -----------------------------
# %% -------------- Linear Model: LASSO, L1 regularization --------------
if GRIDSEARCH == 'on':
rgr = lassoGS(X, y)
rgr.fit(X, y)
else:
rgr = linear_model.Lasso(alpha=0.3)
rgr.fit(X, y)
y_pred_train = rgr.predict(X)
y_pred_test = rgr.predict(X_test)
print(rgr.coef_, rgr.intercept_)
mse = mean_squared_error(y_test, y_pred_test)
rmse_train = mean_squared_error(y, y_pred_train, squared=False)
rmse = mean_squared_error(y_test, y_pred_test, squared=False)
fig, axs = plt.subplots(1, 2, constrained_layout=True, figsize=(8,4))
axs[0].set_title('Train '+option)
axs[0].plot(y, y, 'blue'); axs[0].set_xlabel('True '+option); axs[0].set_ylabel('Predicted '+option)
axs[0].plot(y, y_pred_train, 'ko')
axs[0].text(-0.3, 0.08, 'RMSE = '+str(round(rmse_train,2)), verticalalignment='bottom', horizontalalignment='right', transform=axs[1].transAxes,color='black', fontsize=10)
axs[1].plot(y_test, y_test, 'blue')
axs[1].plot(y_test, y_pred_test, 'go')
axs[1].set_title('Test '+option)
axs[1].text(1.2, 0.08, 'RMSE = '+str(round(rmse,2)), verticalalignment='bottom', horizontalalignment='right', transform=axs[1].transAxes,color='green', fontsize=10)
axs[1].plot(y, y, 'blue'); axs[1].set_xlabel('True '+option);
plt.show()
ct += 1
method = Methods[ct]
y_pred_train = pd.DataFrame(y_pred_train, columns=['y_pred_train'], index=y.index)
y_pred_test = pd.DataFrame(y_pred_test, columns=['y_pred_test'], index=y_test.index)
train_all[method] = y_pred_train
test_all[method] = y_pred_test
error.loc['MSE', method] = mse.round()
error.loc['RMSE', method] = rmse.round()
fig.savefig('./ML_Results/'+str(ct)+"_"+method+'.jpg')
#Save Model
pkl_filename = method+".pkl"
with open('./ML_Results/models/'+pkl_filename, 'wb') as file:
pickle.dump(rgr, file)
#Load Model and Validate holdout Well
with open('./ML_Results/models/'+pkl_filename, 'rb') as file:
model = pickle.load(file)
print(error)
#-------------------------------- End Lasso --------------------------
# %%
# %% -------------- Linear Model: ElasticNet, L1+L2 regularization --------------
if GRIDSEARCH == 'on':
rgr = ElasticNetGS(X, y)
rgr.fit(X, y)
else:
rgr = linear_model.ElasticNet(alpha=0.5, l1_ratio=0.1, random_state = 5, selection='random')
rgr.fit(X, y)
y_pred_train = rgr.predict(X)
y_pred_test = rgr.predict(X_test)
print(rgr.coef_, rgr.intercept_)
mse = mean_squared_error(y_test, y_pred_test)
rmse_train = mean_squared_error(y, y_pred_train, squared=False)
rmse = mean_squared_error(y_test, y_pred_test, squared=False)
fig, axs = plt.subplots(1, 2, constrained_layout=True, figsize=(8,4))
axs[0].set_title('Train '+option)
axs[0].plot(y, y, 'blue'); axs[0].set_xlabel('True '+option); axs[0].set_ylabel('Predicted '+option)
axs[0].plot(y, y_pred_train, 'ko')
axs[0].text(-0.3, 0.08, 'RMSE = '+str(round(rmse_train,2)), verticalalignment='bottom', horizontalalignment='right', transform=axs[1].transAxes,color='black', fontsize=10)
axs[1].plot(y_test, y_test, 'blue')
axs[1].plot(y_test, y_pred_test, 'go')
axs[1].set_title('Test '+option)
axs[1].text(1.2, 0.08, 'RMSE = '+str(round(rmse,2)), verticalalignment='bottom', horizontalalignment='right', transform=axs[1].transAxes,color='green', fontsize=10)
axs[1].plot(y, y, 'blue'); axs[1].set_xlabel('True '+option);
plt.show()
ct +=1
method = Methods[ct]
y_pred_train = pd.DataFrame(y_pred_train, columns=['y_pred_train'], index=y.index)
y_pred_test = pd.DataFrame(y_pred_test, columns=['y_pred_test'], index=y_test.index)
train_all[method] = y_pred_train
test_all[method] = y_pred_test
error.loc['MSE', method] = mse.round()
error.loc['RMSE', method] = rmse.round()
fig.savefig('./ML_Results/'+str(ct)+"_"+method+'.jpg')
#Save Model
pkl_filename = method+".pkl"
with open('./ML_Results/models/'+pkl_filename, 'wb') as file:
pickle.dump(rgr, file)
#Load Model and Validate holdout Well
with open('./ML_Results/models/'+pkl_filename, 'rb') as file:
model = pickle.load(file)
print(error)
#-------------------------------- End ElasticNet --------------------------
#--------------------------------------------------------------------------
# %% ------------------- Linear Model: Ridge Regression -------------------
if GRIDSEARCH == 'on':
rgr = RidgeGS(X, y)
rgr.fit(X, y)
else:
rgr = linear_model.Ridge(alpha=0.5, solver='auto')
rgr.fit(X, y)
y_pred_train = rgr.predict(X)
y_pred_test = rgr.predict(X_test)
print(rgr.coef_, rgr.intercept_)
mse = mean_squared_error(y_test, y_pred_test)
rmse_train = mean_squared_error(y, y_pred_train, squared=False)
rmse = mean_squared_error(y_test, y_pred_test, squared=False)
fig, axs = plt.subplots(1, 2, constrained_layout=True, figsize=(8,4))
axs[0].set_title('Train '+option)
axs[0].plot(y, y, 'blue'); axs[0].set_xlabel('True '+option); axs[0].set_ylabel('Predicted '+option)
axs[0].plot(y, y_pred_train, 'ko')
axs[0].text(-0.3, 0.08, 'RMSE = '+str(round(rmse_train,2)), verticalalignment='bottom', horizontalalignment='right', transform=axs[1].transAxes,color='black', fontsize=10)
axs[1].plot(y_test, y_test, 'blue')
axs[1].plot(y_test, y_pred_test, 'go')
axs[1].set_title('Test '+option)
axs[1].text(1.2, 0.08, 'RMSE = '+str(round(rmse,2)), verticalalignment='bottom', horizontalalignment='right', transform=axs[1].transAxes,color='green', fontsize=10)
axs[1].plot(y, y, 'blue'); axs[1].set_xlabel('True '+option);
plt.show()
ct +=1
method = Methods[ct]
y_pred_train = pd.DataFrame(y_pred_train, columns=['y_pred_train'], index=y.index)
y_pred_test = pd.DataFrame(y_pred_test, columns=['y_pred_test'], index=y_test.index)
train_all[method] = y_pred_train
test_all[method] = y_pred_test
error.loc['MSE', method] = mse.round()
error.loc['RMSE', method] = rmse.round()
fig.savefig('./ML_Results/'+str(ct)+"_"+method+'.jpg')
#Save Model
pkl_filename = method+".pkl"
with open('./ML_Results/models/'+pkl_filename, 'wb') as file:
pickle.dump(rgr, file)
#Load Model and Validate holdout Well
with open('./ML_Results/models/'+pkl_filename, 'rb') as file:
model = pickle.load(file)
print(error)
#--------------------------------- End Ridge ------------------------------
#--------------------------------------------------------------------------
# %% ------------------- Support Vector Machines: SVR ---------------------
# https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVR.html
if GRIDSEARCH == 'on':
rgr = SVRGS(X, np.ravel(y))
rgr.fit(X, np.ravel(y))
else:
rgr = SVR(C= 150, epsilon=0.2)
rgr.fit(X, np.ravel(y))
y_pred_train = rgr.predict(X)
y_pred_test = rgr.predict(X_test)
mse = mean_squared_error(y_test, y_pred_test)
rmse_train = mean_squared_error(y, y_pred_train, squared=False)
rmse = mean_squared_error(y_test, y_pred_test, squared=False)
fig, axs = plt.subplots(1, 2, constrained_layout=True, figsize=(8,4))
axs[0].set_title('Train '+option)
axs[0].plot(y, y, 'blue'); axs[0].set_xlabel('True '+option); axs[0].set_ylabel('Predicted '+option)
axs[0].plot(y, y_pred_train, 'ko')
axs[0].text(-0.3, 0.08, 'RMSE = '+str(round(rmse_train,2)), verticalalignment='bottom', horizontalalignment='right', transform=axs[1].transAxes,color='black', fontsize=10)
axs[1].plot(y_test, y_test, 'blue')
axs[1].plot(y_test, y_pred_test, 'go')
axs[1].set_title('Test '+option)
axs[1].text(1.2, 0.08, 'RMSE = '+str(round(rmse,2)), verticalalignment='bottom', horizontalalignment='right', transform=axs[1].transAxes,color='green', fontsize=10)
axs[1].plot(y, y, 'blue'); axs[1].set_xlabel('True '+option);
plt.show()
ct +=1
method = Methods[ct]
y_pred_train = pd.DataFrame(y_pred_train, columns=['y_pred_train'], index=y.index)
y_pred_test = pd.DataFrame(y_pred_test, columns=['y_pred_test'], index=y_test.index)
train_all[method] = y_pred_train
test_all[method] = y_pred_test
error.loc['MSE', method] = mse.round()
error.loc['RMSE', method] = rmse.round()
fig.savefig('./ML_Results/'+str(ct)+"_"+method+'.jpg')
#Save Model
pkl_filename = method+".pkl"
with open('./ML_Results/models/'+pkl_filename, 'wb') as file:
pickle.dump(rgr, file)
#Load Model and Validate holdout Well
with open('./ML_Results/models/'+pkl_filename, 'rb') as file:
model = pickle.load(file)
print(error)
#--------------------------------- End SVR --------------------------------
#-o-o-o-o-o-o-o-o-o-o-o-o-o- ENSEMBLE METHODS -o-o-o-o-o-o-o-o-o-o-o-o-o-o
# Ref: https://scikit-learn.org/stable/modules/ensemble.html
# %% ------------------- Boosting: Gradient Tree Boosting ---------------------
# https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingRegressor.html#sklearn.ensemble.GradientBoostingRegressor
GRIDSEARCH == 'off'
if GRIDSEARCH == 'on':
rgr = GradientBoostingRegressorGS(X, np.ravel(y))
rgr.fit(X, np.ravel(y))
else:
rgr = GradientBoostingRegressor(n_estimators=100, learning_rate=0.01, alpha=0.336)
rgr.fit(X, np.ravel(y))
y_pred_train = rgr.predict(X)
y_pred_test = rgr.predict(X_test)
mse = mean_squared_error(y_test, y_pred_test)
rmse_train = mean_squared_error(y, y_pred_train, squared=False)
rmse = mean_squared_error(y_test, y_pred_test, squared=False)
fig, axs = plt.subplots(1, 2, constrained_layout=True, figsize=(8,4))
axs[0].set_title('Train '+option)
axs[0].plot(y, y, 'blue'); axs[0].set_xlabel('True '+option); axs[0].set_ylabel('Predicted '+option)
axs[0].plot(y, y_pred_train, 'ko')
axs[0].text(-0.3, 0.08, 'RMSE = '+str(round(rmse_train,2)), verticalalignment='bottom', horizontalalignment='right', transform=axs[1].transAxes,color='black', fontsize=10)
axs[1].plot(y_test, y_test, 'blue')
axs[1].plot(y_test, y_pred_test, 'go')
axs[1].set_title('Test '+option)
axs[1].text(1.2, 0.08, 'RMSE = '+str(round(rmse,2)), verticalalignment='bottom', horizontalalignment='right', transform=axs[1].transAxes,color='green', fontsize=10)
axs[1].plot(y, y, 'blue'); axs[1].set_xlabel('True '+option);
plt.show()
ct +=1
method = Methods[ct]
y_pred_train = pd.DataFrame(y_pred_train, columns=['y_pred_train'], index=y.index)
y_pred_test = pd.DataFrame(y_pred_test, columns=['y_pred_test'], index=y_test.index)
train_all[method] = y_pred_train
test_all[method] = y_pred_test
error.loc['MSE', method] = mse.round()
error.loc['RMSE', method] = rmse.round()
fig.savefig('./ML_Results/'+str(ct)+"_"+method+'.jpg')
#Save Model
pkl_filename = method+".pkl"
with open('./ML_Results/models/'+pkl_filename, 'wb') as file:
pickle.dump(rgr, file)
#Load Model and Validate holdout Well
with open('./ML_Results/models/'+pkl_filename, 'rb') as file:
model = pickle.load(file)
print(error)
#----------------------- End Gradient Boosting -----------------------------
# %% ----------------------------------------------- Neural Network ----------------------------------------------------
# https://scikit-learn.org/stable/modules/generated/sklearn.neural_network.MLPRegressor.html#sklearn.neural_network.MLPRegressor
# Default: solver: Adam, activation: relu, learning rate: constant given by learning_rate_init, batch_size='auto'
# hidden_layer_sizes: tuple, length = n_layers - 2, default=(100,),which means one hidden layer with 100 neurons
# hidden_layer_sizes (30, 30, 30) means 3 hidden layers with 30 neurons each.
# Use e.g. [x for x in itertools.product((10,50,100,30),repeat=4)] to generate all possible 4-hidden layer combinations
GRIDSEARCH == 'off'
if GRIDSEARCH == 'on':
rgr = MLPRegressorGS(X, np.ravel(y))
rgr.fit(X, np.ravel(y))
else:
rgr = MLPRegressor(hidden_layer_sizes=(10, 50, 100), alpha=0.66333, batch_size='auto', learning_rate_init=0.01)
rgr.fit(X, np.ravel(y))
y_pred_train = rgr.predict(X)
y_pred_test = rgr.predict(X_test)
mse = mean_squared_error(y_test, y_pred_test)
rmse_train = mean_squared_error(y, y_pred_train, squared=False)
rmse = mean_squared_error(y_test, y_pred_test, squared=False)
fig, axs = plt.subplots(1, 2, constrained_layout=True, figsize=(8,4))
axs[0].set_title('Train '+option)
axs[0].plot(y, y, 'blue'); axs[0].set_xlabel('True '+option); axs[0].set_ylabel('Predicted '+option)
axs[0].plot(y, y_pred_train, 'ko')
axs[0].text(-0.3, 0.08, 'RMSE = '+str(round(rmse_train,2)), verticalalignment='bottom', horizontalalignment='right', transform=axs[1].transAxes,color='black', fontsize=10)
axs[1].plot(y_test, y_test, 'blue')
axs[1].plot(y_test, y_pred_test, 'go')
axs[1].set_title('Test '+option)
axs[1].text(1.2, 0.08, 'RMSE = '+str(round(rmse,2)), verticalalignment='bottom', horizontalalignment='right', transform=axs[1].transAxes,color='green', fontsize=10)
axs[1].plot(y, y, 'blue'); axs[1].set_xlabel('True '+option);
plt.show()
ct +=1
method = Methods[ct]
y_pred_train = pd.DataFrame(y_pred_train, columns=['y_pred_train'], index=y.index)
y_pred_test = pd.DataFrame(y_pred_test, columns=['y_pred_test'], index=y_test.index)
train_all[method] = y_pred_train
test_all[method] = y_pred_test
error.loc['MSE', method] = mse.round()
error.loc['RMSE', method] = rmse.round()
fig.savefig('./ML_Results/'+str(ct)+"_"+method+'.jpg')
#Save Model
pkl_filename = method+".pkl"
with open('./ML_Results/models/'+pkl_filename, 'wb') as file:
pickle.dump(rgr, file)
#Load Model
with open('./ML_Results/models/'+pkl_filename, 'rb') as file:
model = pickle.load(file)
print(error)
#----------------------- End Neural Network -----------------------------
#%%
#--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*
#--*--*--*--*--*--*--*--*--* Save Results *--*--*--*--*--*--*--*--*--*--*
#--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*
error.to_excel('./ML_Results/Errors.xlsx')
#train_all = train_all.merge(df['DEPT'], left_index=True, right_index=True)
train_all['Set'] = 'Train'
#test_all = test_all.merge(df['DEPT'], left_index=True, right_index=True)
test_all['Set'] = 'Test'
full_set = train_all.append(test_all)
full_set.to_excel('./ML_Results/Train_Test_Results.xls')
full_set['Well'=='T2'].sort_values('DEPT').to_excel('./ML_Results/T2_test/Results.xls')
#%%
| [
"[email protected]"
] | |
ea2dd8bf45e6bb9c2b6ff09afb422072ab1fe92b | 8566f9905a831b05dd79c0cb0d1cf99bd258a917 | /models/sml/sml_celeba_cactus.py | 0360822647d3a2a0a40c06b37deafe44bdd0e926 | [
"Apache-2.0"
] | permissive | emerld2011/MetaLearning-TF2.0 | 8e81db2a5489ffc2ccb4f21eb8202d1c50106844 | de852bd3b2ff46f8d390cebf561add3a166ee855 | refs/heads/main | 2023-08-14T18:21:23.539460 | 2021-09-17T14:42:22 | 2021-09-17T14:42:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,397 | py | import tensorflow as tf
from models.sml.sml import SML
from networks.maml_umtra_networks import MiniImagenetModel
from databases import CelebADatabase, LFWDatabase
def run_celeba():
celeba_database = CelebADatabase()
base_model = tf.keras.applications.VGG19(weights='imagenet')
feature_model = tf.keras.models.Model(inputs=base_model.input, outputs=base_model.layers[24].output)
sml = SML(
database=celeba_database,
target_database=LFWDatabase(),
network_cls=MiniImagenetModel,
n=5,
k=1,
k_val_ml=5,
k_val_val=15,
k_val_test=15,
k_test=15,
meta_batch_size=4,
num_steps_ml=5,
lr_inner_ml=0.05,
num_steps_validation=5,
save_after_iterations=15000,
meta_learning_rate=0.001,
n_clusters=500,
feature_model=feature_model,
# feature_size=288,
feature_size=4096,
input_shape=(224, 224, 3),
preprocess_function=tf.keras.applications.vgg19.preprocess_input,
log_train_images_after_iteration=1000,
number_of_tasks_val=100,
number_of_tasks_test=1000,
clip_gradients=True,
report_validation_frequency=250,
experiment_name='cactus_celeba_original3'
)
sml.train(iterations=60000)
sml.evaluate(iterations=50, seed=42)
if __name__ == '__main__':
run_celeba()
| [
"[email protected]"
] | |
6327cc15593e29d7b0f45312359af9350a81a2cc | 15d477b2bc7da4e1bddd6fa33f0768fcbd4c82c3 | /simple_3d_engine.py | 62feb8ad80ef2a33f85e02f37a7f238e444e751b | [] | no_license | gunny26/pygame | 364d4a221e2d11bd491190f97670b09123146ad7 | 1fd421195a2888c0588a49f5a043a1110eedcdbf | refs/heads/master | 2022-10-20T00:56:34.415095 | 2022-10-03T19:27:52 | 2022-10-03T19:27:52 | 7,414,604 | 5 | 11 | null | null | null | null | UTF-8 | Python | false | false | 6,778 | py | #!/usr/bin/python3
import sys
import math
import time
# non std
import pygame
class Vec2d:
def __init__(self, x, y):
self.x = x
self.y = y
class Vec3d:
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
class Vec4d:
def __init__(self, x, y, z, w):
self.x = x
self.y = y
self.z = z
self.w = w
class Triangle:
def __init__(self, v1, v2, v3):
self.v1 = v1
self.v2 = v2
self.v3 = v3
class Mesh:
def __init__(self, triangles):
self.triangles = triangles
def __getitem__(self, index):
return self.triangles[index]
class Matrix4x4:
def __init__(self, vecs):
self.vecs = vecs
def __mul__(self, vec3d):
vec4d = Vec4d(*[
vec3d.x * self.vecs[0][0] + vec3d.y * self.vecs[1][0] + vec3d.z * self.vecs[2][0] + self.vecs[3][0],
vec3d.x * self.vecs[0][1] + vec3d.y * self.vecs[1][1] + vec3d.z * self.vecs[2][1] + self.vecs[3][1],
vec3d.x * self.vecs[0][2] + vec3d.y * self.vecs[1][2] + vec3d.z * self.vecs[2][2] + self.vecs[3][2],
vec3d.x * self.vecs[0][3] + vec3d.y * self.vecs[1][3] + vec3d.z * self.vecs[2][3] + self.vecs[3][3]
])
if vec4d.w != 0.0:
vec4d.x /= vec4d.w
vec4d.y /= vec4d.w
vec4d.z /= vec4d.w
return vec4d
def draw_triangle(surface, color, triangle):
pygame.draw.line(surface, color, (triangle.v1.x, triangle.v1.y), (triangle.v2.x, triangle.v2.y), 1)
pygame.draw.line(surface, color, (triangle.v2.x, triangle.v2.y), (triangle.v3.x, triangle.v3.y), 1)
pygame.draw.line(surface, color, (triangle.v3.x, triangle.v3.y), (triangle.v1.x, triangle.v1.y), 1)
if __name__=='__main__':
try:
surface = pygame.display.set_mode((600,600))
pygame.init()
# constants
FPS = 20
WHITE = (255, 255, 255) # white
clock = pygame.time.Clock()
width = surface.get_width()
height = surface.get_height()
theta = 90.0 # sixtee degree
z_near = 0.1
z_far = 1000.0
# some well known terms
fov = 1 / math.tan(theta / 2) # field of view
aspect = width / height
q = z_far / (z_far - z_near) # z projection
# help:
# x = aspect * fov * x / z
# y = fov * y / z
# z = z * (q - q * z_near)
projection_matrix = Matrix4x4([
[ aspect * fov, 0 , 0 , 0],
[ 0 , fov, 0 , 0],
[ 0 , 0 , q , 1],
[ 0 , 0 , -z_near * q, 0]
])
# define cube, all triangle in same direction
cube = Mesh([
Triangle(Vec3d(0, 0, 0), Vec3d(0, 1, 0), Vec3d(1, 1, 0)), # south
Triangle(Vec3d(1, 1, 0), Vec3d(1, 0, 0), Vec3d(0, 0, 0)), # south
Triangle(Vec3d(1, 0, 0), Vec3d(1, 1, 0), Vec3d(1, 1, 1)), # east
Triangle(Vec3d(1, 1, 1), Vec3d(1, 0, 1), Vec3d(1, 0, 0)), # east
Triangle(Vec3d(0, 0, 1), Vec3d(0, 1, 1), Vec3d(0, 1, 0)), # west
Triangle(Vec3d(0, 1, 0), Vec3d(0, 0, 0), Vec3d(0, 0, 1)), # west
Triangle(Vec3d(1, 0, 1), Vec3d(1, 1, 1), Vec3d(0, 0, 1)), # north
Triangle(Vec3d(0, 0, 1), Vec3d(0, 1, 1), Vec3d(1, 1, 1)), # north
Triangle(Vec3d(0, 0, 1), Vec3d(0, 0, 0), Vec3d(1, 0, 0)), # bottom
Triangle(Vec3d(1, 0, 0), Vec3d(1, 0, 1), Vec3d(0, 0, 1)), # bottom
Triangle(Vec3d(0, 1, 0), Vec3d(0, 1, 1), Vec3d(1, 1, 1)), # top
Triangle(Vec3d(1, 1, 1), Vec3d(1, 1, 0), Vec3d(0, 1, 0)) # top
])
while True:
clock.tick(FPS)
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
sys.exit(0)
keyinput = pygame.key.get_pressed()
if keyinput is not None:
# print keyinput
if keyinput[pygame.K_ESCAPE]:
sys.exit(1)
surface.fill(0)
# create rotation matrix around x and z axis
f_theta = time.time()
rot_x = Matrix4x4([
[ 1, 0, 0, 0 ],
[ 0, math.cos(f_theta), -math.sin(f_theta), 0 ],
[ 0, math.sin(f_theta), math.cos(f_theta), 0 ],
[ 0, 0, 0, 1 ]
])
rot_y = Matrix4x4([
[ math.cos(f_theta), 0, math.sin(f_theta), 0 ],
[ 0, 1, 0, 0 ],
[ -math.sin(f_theta), 0, math.cos(f_theta), 0 ],
[ 0, 0, 0, 1 ]
])
rot_z = Matrix4x4([
[ math.cos(f_theta * 0.5), -math.sin(f_theta * 0.5), 0, 0 ],
[ math.sin(f_theta * 0.5), math.cos(f_theta * 0.5), 0, 0 ],
[ 0, 0, 1, 0 ],
[ 0, 0, 0, 1 ]
])
# do projection
for triangle in cube:
# rotate z
t_z = Triangle(*[
rot_z * triangle.v1,
rot_z * triangle.v2,
rot_z * triangle.v3
])
# rotate x
t_x = Triangle(*[
rot_x * t_z.v1,
rot_x * t_z.v2,
rot_x * t_z.v3
])
# translate into Z
t_t = Triangle(*[
Vec3d(t_x.v1.x, t_x.v1.y, t_x.v1.z + 3.0),
Vec3d(t_x.v2.x, t_x.v2.y, t_x.v2.z + 3.0),
Vec3d(t_x.v3.x, t_x.v3.y, t_x.v3.z + 3.0)
])
# project
t_p = Triangle(*[
projection_matrix * t_t.v1,
projection_matrix * t_t.v2,
projection_matrix * t_t.v3
])
# shift to positive values
t_p.v1.x += 1.0
t_p.v1.y += 1.0
t_p.v2.x += 1.0
t_p.v2.y += 1.0
t_p.v3.x += 1.0
t_p.v3.y += 1.0
# scale by half the screen
t_p.v1.x *= width / 2
t_p.v1.y *= height / 2
t_p.v2.x *= width/2
t_p.v2.y *= height / 2
t_p.v3.x *= width/2
t_p.v3.y *= height / 2
draw_triangle(surface, WHITE, t_p)
pygame.display.flip()
except KeyboardInterrupt:
print('shutting down')
| [
"[email protected]"
] | |
7a08c8c0f10a0f895db76c5b49933e2ec90cfd3c | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /8WvpPQto44PqNLSqJ_16.py | 1beb2118e0b5593eebf9a90ec62cd377057db150 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py |
def pad(message):
if len(message)<140:
if not len(message)%2:
message += ' l'
else:
message += 'l'
while len(message)<140:
message += 'ol'
return message
| [
"[email protected]"
] | |
d1297225e16ebf5161aebae8590fb1060bd9310e | e6f16fbba8fba750099252c3490f00079cb19101 | /算法/053_最大子序求和.py | e493a34fa89f268394e4fe3b768062a9898cacd9 | [] | no_license | hookeyplayer/exercise.io | 0a36fbec9df6c24b60ff6f97de27d3d5ae7769d4 | 605c81cb44443efd974db9fa0a088ddcd5a96f0f | refs/heads/master | 2023-06-20T17:03:20.310816 | 2021-07-31T12:50:21 | 2021-07-31T12:50:21 | 277,175,487 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,012 | py | from typing import List
class Solution:
# DP1
# 贪心
def maxSubArray(self, nums: List[int]) -> int:
thisSum = 0
maxSum = -2**31
for i in range(len(nums)):
if thisSum < 0:
thisSum = 0
thisSum += nums[i]
maxSum = max(maxSum, thisSum)
return maxSum
# DP2
def maxSubArray(self, nums: List[int]) -> int:
# 构造子序列时保证以第n个元素结尾
# 原先是正增益的作用,则包含到新的子序列中;否则抛弃前面的子序列
dp = [0] * len(nums)
dp[0] = nums[0]
ans = nums[0]
for i in range(1, len(nums)):
dp[i] = max(dp[i-1], 0) + nums[i]
ans = max(ans, dp[i])
return ans
# DP3
def maxSubArray(self, nums: List[int]) -> int:
maxEndingHere = ans = nums[0]
for i in range(1, len(nums)):
maxEndingHere = max(maxEndingHere+nums[i], nums[i])
ans = max(maxEndingHere, ans)
return ans
if __name__ == '__main__':
test = Solution()
print(test.maxSubArray([-2, 3, -1, -8]))
| [
"[email protected]"
] | |
a61936e0b3152ba6a39cdb301e33356c628ccc6c | a14ec6e367e6a471bfc74c066fb958ef585bc269 | /2022/22/b.py | 1b47f0a8bee5acc2c3a1acc74fd052c972b656f5 | [] | no_license | jimhendy/AoC | 90641814ed431f46a8500ff0f022c6c957567563 | a1727f88bc2e6f739d65902dce188377966b3fb4 | refs/heads/master | 2023-09-02T14:48:39.860352 | 2023-08-28T08:09:19 | 2023-08-28T08:09:19 | 225,152,422 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,593 | py | import re
from collections import defaultdict, deque
from collections.abc import Callable
from tools.point import Point2D
DIRECTIONS = deque(["right", "down", "left", "up"])
FACING_VALUE = {"right": 0, "left": 2, "up": 3, "down": 1}
OPPOSITES = {"down": "up", "up": "down", "left": "right", "right": "left"}
def run(inputs: str): # sourcery skip: collection-into-set
inputs = inputs.splitlines()
board = inputs[:-2]
open_tiles = set()
walls = set()
loop_tiles: dict[str, dict[Point2D, Point2D]] = defaultdict(dict)
loop_directions: dict[str, dict[Point2D, Point2D]] = defaultdict(dict)
for y, row in enumerate(board):
for x, char in enumerate(row):
if char == " ":
continue
if char == "#":
walls.add(Point2D(x, y))
elif char == ".":
open_tiles.add(Point2D(x, y))
side_length = max(len(board) // 4, max(len(line) for line in board) // 4)
def extract_from_x(
x_lambda: Callable[[int], int],
min_or_max: Callable,
) -> Callable[[int], Point2D]:
def func(i: int) -> Point2D:
x = x_lambda(i)
return Point2D(x=x, y=min_or_max(p.y for p in open_tiles if p.x == x))
return func
def extract_from_y(
y_lambda: Callable[[int], int],
min_or_max: Callable,
) -> Callable[[int], Point2D]:
def func(i: int) -> Point2D:
y = y_lambda(i)
return Point2D(
x=min_or_max(p.x for p in open_tiles if p.y == y),
y=y,
)
return func
def configure_moves(
leaving_lambda: Callable[[int], Point2D],
entering_lambda: Callable[[int], Point2D],
leaving_direction: str,
entering_direction: str,
):
"""
Add the loop data for a particular side exit.
E.g. for the unfolded cube:
11
11
223344
223344
5566
5566
Exiting the right of side 1 would enter from the right of side 6.
Hence, this function is used with
* leaving_direction="right"
* entering_direction="left"
With functions taking in an iterable of side length (2 in example) to create the Point2Ds
e.g.
leaving_lambda=lambda i: i
entering_lambda=lambda i: side_length * 3 - 1 - i
"""
if leaving_direction in {"up", "down"}:
leaving_extract = extract_from_x
leaving_min_max = min if leaving_direction == "up" else max
else:
leaving_extract = extract_from_y
leaving_min_max = min if leaving_direction == "left" else max
if entering_direction in {"up", "down"}:
entering_extract = extract_from_x
entering_min_max = min if entering_direction == "down" else max
else:
entering_extract = extract_from_y
entering_min_max = min if entering_direction == "right" else max
for i in range(side_length):
leaving = leaving_extract(leaving_lambda, leaving_min_max)(i)
entering = entering_extract(entering_lambda, entering_min_max)(i)
beyond_leaving = leaving + Point2D.steps[leaving_direction]
beyond_entering = entering + Point2D.steps[OPPOSITES[entering_direction]]
can_leave = beyond_leaving not in walls
can_enter = beyond_entering not in walls
if can_leave and can_enter:
loop_tiles[leaving_direction][beyond_leaving] = entering
loop_directions[leaving_direction][beyond_leaving] = entering_direction
loop_tiles[OPPOSITES[entering_direction]][beyond_entering] = leaving
loop_directions[OPPOSITES[entering_direction]][
beyond_entering
] = OPPOSITES[leaving_direction]
if side_length == 4:
# Right of 1, right of 6
configure_moves(
leaving_direction="right",
leaving_lambda=lambda i: i,
entering_direction="left",
entering_lambda=lambda i: 3 * side_length - 1 - i,
)
# Right of 4, top of 6
configure_moves(
leaving_direction="right",
leaving_lambda=lambda i: side_length + i,
entering_direction="down",
entering_lambda=lambda i: 4 * side_length - 1 - i,
)
# Bottom of 6, left of 2
configure_moves(
leaving_direction="down",
leaving_lambda=lambda i: 3 * side_length + i,
entering_direction="right",
entering_lambda=lambda i: 2 * side_length - 1 - i,
)
# Bottom of 5, bottom of 2
configure_moves(
leaving_direction="down",
leaving_lambda=lambda i: 2 * side_length + i,
entering_direction="up",
entering_lambda=lambda i: side_length - 1 - i,
)
# Top of 1, top of 2
configure_moves(
leaving_direction="up",
leaving_lambda=lambda i: 2 * side_length + i,
entering_direction="down",
entering_lambda=lambda i: side_length - 1 - i,
)
# Left of 1, top of 3
configure_moves(
leaving_direction="left",
leaving_lambda=lambda i: i,
entering_direction="down",
entering_lambda=lambda i: side_length + i,
)
# Bottom of 3, left of 5
configure_moves(
leaving_direction="down",
leaving_lambda=lambda i: side_length + i,
entering_direction="right",
entering_lambda=lambda i: 3 * side_length - 1 - i,
)
else: # =====================================================================================
# Right side 2, right side 4
configure_moves(
leaving_direction="right",
leaving_lambda=lambda i: i,
entering_direction="left",
entering_lambda=lambda i: 3 * side_length - 1 - i,
)
# Right side 3, bottom side 2
configure_moves(
leaving_direction="right",
leaving_lambda=lambda i: side_length + i,
entering_direction="up",
entering_lambda=lambda i: 2 * side_length + i,
)
# Right side 6, bottom side 4
configure_moves(
leaving_direction="right",
leaving_lambda=lambda i: 3 * side_length + i,
entering_direction="up",
entering_lambda=lambda i: side_length + i,
)
# Top side 2, bottom side 6
configure_moves(
leaving_direction="up",
leaving_lambda=lambda i: 2 * side_length + i,
entering_direction="up",
entering_lambda=lambda i: i,
)
# Top side 1, left side 6
configure_moves(
leaving_direction="up",
leaving_lambda=lambda i: side_length + i,
entering_direction="right",
entering_lambda=lambda i: 3 * side_length + i,
)
# Left side 1, left side 5
configure_moves(
leaving_direction="left",
leaving_lambda=lambda i: i,
entering_direction="right",
entering_lambda=lambda i: 3 * side_length - 1 - i,
)
# Left side 3, top side 5
configure_moves(
leaving_direction="left",
leaving_lambda=lambda i: side_length + i,
entering_direction="down",
entering_lambda=lambda i: i,
)
moves = map(int, re.findall(r"\d+", inputs[-1]))
turns = iter(re.findall(r"[RL]", inputs[-1]))
loc = Point2D(min(p.x for p in open_tiles if p.y == 0), 0)
step = Point2D.steps["right"]
for num_steps in moves:
for _ in range(num_steps):
new_loc = loc + step
if new_loc in open_tiles:
loc = new_loc
elif new_loc in loop_tiles[DIRECTIONS[0]]:
loc = loop_tiles[DIRECTIONS[0]][new_loc]
new_direction = loop_directions[DIRECTIONS[0]][new_loc]
while DIRECTIONS[0] != new_direction:
DIRECTIONS.rotate()
step = Point2D.steps[new_direction]
try:
rot = -1 if next(turns) == "R" else 1
DIRECTIONS.rotate(rot)
step = Point2D.steps[DIRECTIONS[0]]
except StopIteration:
...
return 1_000 * (abs(loc.y) + 1) + 4 * (loc.x + 1) + FACING_VALUE[DIRECTIONS[0]]
| [
"[email protected]"
] | |
9ea0d53abc6fe76ea844647c939081ba7bef497d | 986a8c5de450fc436897de9aaff4c5f737074ee3 | /笔试题/2019 PayPal实习生招聘编程卷/2_寻找关联用户.py | 9f80fb24348a2f07ea5df09bb989cd129384753d | [] | no_license | lovehhf/newcoder_py | 7a0ef03f0ea733ec925a10f06566040f6edafa67 | f8ae73deef1d9422ca7b0aa9f484dc96db58078c | refs/heads/master | 2020-04-27T18:20:19.082458 | 2019-05-24T15:30:13 | 2019-05-24T15:30:13 | 174,564,930 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,697 | py | # -*- coding:utf-8 -*-
__author__ = 'huanghf'
"""
PayPal上海团队一直致力于风险控制,风控需要收集各种信息,有时需要通过地理位置找出用户与用户之间存在的关联关系,
这一信息可能会用于找出用户潜在存在的风险问题。我们记两个用户的关联关系可以表示为:
(1). user1,user2与他们最常发生交易的地理位置分别为(x1, y1),(x2, y2),当这两个用户的欧氏距离不超过d时,我们就认为两个用户关联。
(2). **用户关联性具有传递性**,若用户1与用户2关联,用户2与用户3关联,那么用户1,2,3均关联。
给定N个用户及其地理位置坐标,将用户按照关联性进行划分,要求返回一个集合,集合中每个元素是属于同一个范围的用户群。
输入描述:
d:欧式距离
N:用户数
之后的N行表示第0个用户到第N-1个用户的地理位置坐标
输出描述:
一个数组集合,所有关联的用户在一个数组中。
输出数组需要按照从小到大的顺序排序,每个集合内的数组也需要按照从小到大的顺序排序。
输入例子1:
2.0
5
3.0 5.0
6.0 13.0
2.0 6.0
7.0 12.0
0.0 2.0
输出例子1:
[[0, 2], [1, 3], [4]]
欧氏距离(Euclid Distance)也称欧几里得度量、欧几里得距离,
是一个通常采用的距离定义,它是在m维空间中两个点之间的真实距离.在二维空间中的欧氏距离就是两点之间的直线段距离.
"""
def solve(d, n, grid):
r = [[] for _ in range(n)]
s = {x for x in range(n)}
for i in range(n - 1):
x1, y1 = grid[i][0], grid[i][1]
for j in range(i + 1, n):
x2, y2 = grid[j][0], grid[j][1]
td = ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5 # 欧氏距离
if td - d <= 1e-6:
r[i].append(j)
r[j].append(i)
s -= {i, j}
# print(s)
# print(r)
# for i in range(n):
# print(i, r[i])
# print(grid[5],grid[0])
res = []
visited = set()
for i in range(n):
if not r[i] or (i in visited):
continue
queue = [i] + r[i]
t = set()
# print(queue,visited)
visited.add(i)
while queue:
cur = queue.pop(0)
t.add(cur)
if r[cur] and not cur in visited:
visited.add(cur)
queue.extend(r[cur])
res.append(sorted(t))
res += [[x] for x in s]
res = sorted(res)
print(res)
def test():
d = 12.6
n = 100
grid = [[11.12, 99.45], [87.04, 54.52], [8.49, 56.22], [44.36, 63.42], [51.64, 16.18], [78.87, 44.55],
[68.98, 8.36], [38.5, 60.72], [8.9, 18.52], [66.65, 69.37], [16.94, 89.92], [2.91, 95.37], [88.73, 67.56],
[48.36, 10.49], [77.66, 81.33], [19.84, 23.79], [93.73, 53.5], [70.4, 84.37], [2.96, 45.82], [71.81, 86.02],
[25.85, 32.62], [38.68, 41.81], [37.4, 97.38], [23.97, 66.08], [91.64, 2.62], [30.52, 60.87],
[28.77, 75.35], [64.51, 54.66], [74.76, 83.85], [3.17, 82.11], [9.85, 55.38], [61.39, 25.37],
[64.98, 98.91], [19.56, 59.94], [3.41, 52.19], [18.59, 60.89], [58.39, 68.43], [37.04, 28.99],
[27.38, 93.92], [40.0, 90.5], [68.04, 65.41], [29.76, 9.24], [2.14, 73.34], [34.22, 25.04], [9.62, 95.71],
[40.49, 98.77], [86.92, 5.87], [18.53, 0.91], [51.52, 97.44], [82.42, 28.02], [97.19, 49.6], [5.07, 69.3],
[43.87, 76.62], [36.86, 74.31], [80.08, 67.32], [77.6, 91.9], [65.82, 21.78], [17.34, 60.42],
[49.76, 35.59], [91.91, 29.64], [55.63, 65.11], [95.93, 86.18], [9.62, 31.09], [58.24, 35.57],
[13.12, 82.71], [12.43, 66.84], [13.45, 49.13], [88.7, 88.32], [88.66, 51.46], [12.21, 76.92],
[42.25, 34.59], [18.11, 27.49], [98.4, 5.19], [83.28, 23.66], [98.97, 30.16], [31.31, 77.16],
[30.12, 57.63], [88.39, 98.54], [47.77, 72.19], [55.23, 69.07], [29.1, 84.83], [12.87, 81.05],
[50.15, 89.02], [98.83, 1.52], [78.53, 84.48], [73.81, 93.22], [74.51, 82.02], [49.29, 95.63],
[13.84, 54.87], [61.39, 29.04], [81.36, 94.3], [57.4, 34.96], [35.52, 60.97], [8.04, 7.26], [11.1, 97.25],
[70.59, 0.15], [9.76, 80.16], [6.05, 21.77], [26.16, 50.0], [96.66, 34.61]]
solve(d, n, grid)
def main():
grid = []
d = float(input())
n = int(input())
for _ in range(n):
grid.append(list(map(float, input().split())))
solve(d, n, grid)
def test2():
x1, y1 = [78.87, 44.55]
for x2, y2 in [[87.04, 54.52], [93.73, 53.5], [97.19, 49.6], [88.66, 51.46]]:
print(((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5)
main()
| [
"[email protected]"
] | |
285805fef3a2ff9e680c5ab423f7d70e47b906fb | 71962596a0693e03e19257f1beb3bdda223ed4ff | /profile_xf05id1/startup/81-saturn.py | f8188b0e760c0e83d75202caca233c3c59ee06b7 | [
"BSD-2-Clause"
] | permissive | tacaswell/ipython_srx | 53561979f27a108063f4851ea314073768098cbb | e3dbb45cfd87c166878e8420654cc7995f772eda | refs/heads/master | 2020-12-25T00:19:11.936763 | 2016-02-18T00:30:51 | 2016-02-18T00:30:51 | 51,659,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,831 | py | from ophyd.mca import (EpicsMCA, EpicsDXP)
from ophyd import (Component as Cpt, Device, EpicsSignal, EpicsSignalRO,
EpicsSignalWithRBV, DeviceStatus)
from ophyd.device import (BlueskyInterface, Staged)
class SaturnMCA(EpicsMCA):
# TODO: fix upstream
preset_real_time = Cpt(EpicsSignal, '.PRTM')
preset_live_time = Cpt(EpicsSignal, '.PLTM')
elapsed_real_time = Cpt(EpicsSignalRO, '.ERTM')
elapsed_live_time = Cpt(EpicsSignalRO, '.ELTM')
check_acquiring = Cpt(EpicsSignal, 'CheckACQG')
client_wait = Cpt(EpicsSignal, 'ClientWait')
collect_data = Cpt(EpicsSignal, 'CollectData')
enable_wait = Cpt(EpicsSignal, 'EnableWait')
erase = Cpt(EpicsSignal, 'Erase')
erase_start = Cpt(EpicsSignal, 'EraseStart')
read = Cpt(EpicsSignal, 'Read')
read_callback = Cpt(EpicsSignal, 'ReadCallback')
read_data_once = Cpt(EpicsSignal, 'ReadDataOnce')
read_status_once = Cpt(EpicsSignal, 'ReadStatusOnce')
set_client_wait = Cpt(EpicsSignal, 'SetClientWait')
start = Cpt(EpicsSignal, 'Start')
status = Cpt(EpicsSignal, 'Status')
stop_signal = Cpt(EpicsSignal, 'Stop')
when_acq_stops = Cpt(EpicsSignal, 'WhenAcqStops')
why1 = Cpt(EpicsSignal, 'Why1')
why2 = Cpt(EpicsSignal, 'Why2')
why3 = Cpt(EpicsSignal, 'Why3')
why4 = Cpt(EpicsSignal, 'Why4')
class SaturnDXP(EpicsDXP):
baseline_energy_array = Cpt(EpicsSignal, 'BaselineEnergyArray')
baseline_histogram = Cpt(EpicsSignal, 'BaselineHistogram')
calibration_energy = Cpt(EpicsSignal, 'CalibrationEnergy_RBV')
current_pixel = Cpt(EpicsSignal, 'CurrentPixel')
dynamic_range = Cpt(EpicsSignal, 'DynamicRange_RBV')
elapsed_live_time = Cpt(EpicsSignal, 'ElapsedLiveTime')
elapsed_real_time = Cpt(EpicsSignal, 'ElapsedRealTime')
elapsed_trigger_live_time = Cpt(EpicsSignal, 'ElapsedTriggerLiveTime')
energy_threshold = Cpt(EpicsSignalWithRBV, 'EnergyThreshold')
gap_time = Cpt(EpicsSignalWithRBV, 'GapTime')
max_width = Cpt(EpicsSignalWithRBV, 'MaxWidth')
mca_bin_width = Cpt(EpicsSignal, 'MCABinWidth_RBV')
num_ll_params = Cpt(EpicsSignal, 'NumLLParams')
peaking_time = Cpt(EpicsSignalWithRBV, 'PeakingTime')
preset_events = Cpt(EpicsSignalWithRBV, 'PresetEvents')
preset_mode = Cpt(EpicsSignalWithRBV, 'PresetMode')
preset_triggers = Cpt(EpicsSignalWithRBV, 'PresetTriggers')
read_ll_params = Cpt(EpicsSignal, 'ReadLLParams')
trace_data = Cpt(EpicsSignal, 'TraceData')
trace_mode = Cpt(EpicsSignalWithRBV, 'TraceMode')
trace_time_array = Cpt(EpicsSignal, 'TraceTimeArray')
trace_time = Cpt(EpicsSignalWithRBV, 'TraceTime')
trigger_gap_time = Cpt(EpicsSignalWithRBV, 'TriggerGapTime')
trigger_peaking_time = Cpt(EpicsSignalWithRBV, 'TriggerPeakingTime')
trigger_threshold = Cpt(EpicsSignalWithRBV, 'TriggerThreshold')
class Saturn(Device):
dxp = Cpt(SaturnDXP, 'dxp1:')
mca = Cpt(SaturnMCA, 'mca1')
channel_advance = Cpt(EpicsSignal, 'ChannelAdvance')
client_wait = Cpt(EpicsSignal, 'ClientWait')
dwell = Cpt(EpicsSignal, 'Dwell')
max_scas = Cpt(EpicsSignal, 'MaxSCAs')
num_scas = Cpt(EpicsSignalWithRBV, 'NumSCAs')
poll_time = Cpt(EpicsSignalWithRBV, 'PollTime')
prescale = Cpt(EpicsSignal, 'Prescale')
save_system = Cpt(EpicsSignalWithRBV, 'SaveSystem')
save_system_file = Cpt(EpicsSignal, 'SaveSystemFile')
set_client_wait = Cpt(EpicsSignal, 'SetClientWait')
class SaturnSoftTrigger(BlueskyInterface):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._status = None
self._acquisition_signal = self.mca.erase_start
self.stage_sigs[self.mca.stop_signal] = 1
self.stage_sigs[self.dxp.preset_mode] = 'Real time'
self._count_signal = self.mca.preset_real_time
self._count_time = None
def stage(self):
if self._count_time is not None:
self.stage_sigs[self._count_signal] = self._count_time
super().stage()
def unstage(self):
try:
super().unstage()
finally:
if self._count_signal in self.stage_sigs:
del self.stage_sigs[self._count_signal]
self._count_time = None
def trigger(self):
"Trigger one acquisition."
if self._staged != Staged.yes:
raise RuntimeError("This detector is not ready to trigger."
"Call the stage() method before triggering.")
self._status = DeviceStatus(self)
self._acquisition_signal.put(1, callback=self._acquisition_done)
return self._status
def _acquisition_done(self, **kwargs):
'''pyepics callback for when put completion finishes'''
if self._status is not None:
self._status._finished()
self._status = None
@property
def count_time(self):
'''Exposure time, as set by bluesky'''
return self._count_time
@count_time.setter
def count_time(self, count_time):
self._count_time = count_time
class SRXSaturn(SaturnSoftTrigger, Saturn):
def __init__(self, prefix, *, read_attrs=None, configuration_attrs=None,
**kwargs):
if read_attrs is None:
read_attrs = ['mca.spectrum']
if configuration_attrs is None:
configuration_attrs = ['mca.preset_real_time',
'mca.preset_live_time',
'dxp.preset_mode',
]
super().__init__(prefix, read_attrs=read_attrs,
configuration_attrs=configuration_attrs, **kwargs)
if __name__ == '__main__':
from ophyd.commands import setup_ophyd
setup_ophyd()
saturn = SRXSaturn('dxpSaturn:', name='saturn')
| [
"[email protected]"
] | |
8c529c07af84d91fd9c4e3012908fb6f78b74f9e | e5d83ede8521027b05d9b91c43be8cab168610e6 | /0x01-python-if_else_loops_functions/0-positive_or_negative.py | 7db6d2d5aec2226e9657ea987dac24bd7104128f | [] | no_license | Danielo814/holbertonschool-higher_level_programming | 8918c3a6a9c136137761d47c5162b650708dd5cd | 832b692529198bbee44d2733464aedfe650bff7e | refs/heads/master | 2020-03-28T11:09:00.343055 | 2019-02-22T03:33:54 | 2019-02-22T03:33:54 | 148,181,433 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | #!/usr/bin/python3
import random
number = random.randint(-10, 10)
if number < 0:
print("{} is negative".format(number))
if number == 0:
print("{} is zero".format(number))
if number > 0:
print("{} is positive".format(number))
| [
"[email protected]"
] | |
30ee5b82371e74006e13d114f8f1da5e37e2137d | 956cc6ff2b58a69292f7d1223461bc9c2b9ea6f1 | /monk/tf_keras_1/transforms/common.py | 9253e05c5cce0e7d2d18e4deaa3832cf3c094726 | [
"Apache-2.0"
] | permissive | Aanisha/monk_v1 | c24279b2b461df9b3de2984bae0e2583aba48143 | c9e89b2bc0c1dbb320aa6da5cba0aa1c1526ad72 | refs/heads/master | 2022-12-29T00:37:15.320129 | 2020-10-18T09:12:13 | 2020-10-18T09:12:13 | 286,278,278 | 0 | 0 | Apache-2.0 | 2020-08-09T16:51:02 | 2020-08-09T16:51:02 | null | UTF-8 | Python | false | false | 3,810 | py | from monk.tf_keras_1.transforms.imports import *
from monk.system.imports import *
from monk.tf_keras_1.transforms.transforms import transform_color_jitter
from monk.tf_keras_1.transforms.transforms import transform_random_affine
from monk.tf_keras_1.transforms.transforms import transform_random_horizontal_flip
from monk.tf_keras_1.transforms.transforms import transform_random_rotation
from monk.tf_keras_1.transforms.transforms import transform_random_vertical_flip
from monk.tf_keras_1.transforms.transforms import transform_mean_subtraction
from monk.tf_keras_1.transforms.transforms import transform_normalize
@accepts(dict, list, post_trace=False)
#@TraceFunction(trace_args=False, trace_rv=False)
def set_transforms(system_dict, set_phases):
'''
Set transforms depending on the training, validation and testing phases.
Args:
system_dict (dict): System dictionary storing experiment state and set variables
set_phases (list): Phases in which to apply the transforms.
Returns:
dict: updated system dict
'''
transforms_test = [];
transforms_train = [];
transforms_val = [];
transformations = system_dict["dataset"]["transforms"];
normalize = False;
for phase in set_phases:
tsf = transformations[phase];
if(phase=="train"):
train_status = True;
val_status = False;
test_status = False;
elif(phase=="val"):
train_status = False;
val_status = True;
test_status = False;
else:
train_status = False;
val_status = False;
test_status = True;
for i in range(len(tsf)):
name = list(tsf[i].keys())[0]
input_dict = tsf[i][name];
train = train_status;
val = val_status;
test = test_status;
if(name == "ColorJitter"):
system_dict = transform_color_jitter(
system_dict,
input_dict["brightness"], input_dict["contrast"], input_dict["saturation"], input_dict["hue"],
train, val, test, retrieve=True
);
elif(name == "RandomAffine"):
system_dict = transform_random_affine(
system_dict,
input_dict["degrees"], input_dict["translate"], input_dict["scale"], input_dict["shear"],
train, val, test, retrieve=True
);
elif(name == "RandomHorizontalFlip"):
system_dict = transform_random_horizontal_flip(
system_dict,
input_dict["p"],
train, val, test, retrieve=True
);
elif(name == "RandomVerticalFlip"):
system_dict = transform_random_vertical_flip(
system_dict,
input_dict["p"],
train, val, test, retrieve=True
);
elif(name == "RandomRotation"):
system_dict = transform_random_rotation(
system_dict,
input_dict["degrees"],
train, val, test, retrieve=True
);
elif(name == "MeanSubtraction"):
system_dict = transform_mean_subtraction(
system_dict,
input_dict["mean"],
train, val, test, retrieve=True
);
elif(name == "Normalize"):
system_dict = transform_normalize(
system_dict,
input_dict["mean"], input_dict["std"],
train, val, test, retrieve=True
);
return system_dict;
| [
"[email protected]"
] | |
9498db4276799b71082911df61014f50e9e00ed4 | c879972850bdef6f9c05ec57c964125e4d5d8dfa | /lino/management/commands/qtclient.py | d7596bcdee3da87a45dd2c84f87e0999c0367cd2 | [
"BSD-2-Clause"
] | permissive | forexblog/lino | 845c17f22c6f58fbf0247b084ceacb5e89fba2ef | 68cbd5dd985737b63091b232b9c788a3a9875eef | refs/heads/master | 2023-02-16T02:33:08.387853 | 2021-01-15T09:39:58 | 2021-01-15T09:39:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,738 | py | # -*- coding: UTF-8 -*-
# Copyright 2017-2021 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
from django.core.management.base import BaseCommand
from django.conf import settings
import sys
from PyQt5.QtWidgets import (QApplication, QWidget, QPushButton,
QMessageBox, QDesktopWidget, QMainWindow,
QAction, qApp, QTextEdit, QHBoxLayout,
QVBoxLayout)
# from PyQt5.QtCore import QCoreApplication
from PyQt5.QtGui import QIcon
from lino.api import rt
from lino.core.menus import Menu # , MenuItem
from unipath import Path
images_path = Path(settings.STATIC_ROOT, Path('static/images/mjames'))
class ItemCaller(object):
def __init__(self, win, mi):
self.mi = mi
self.win = win
def __call__(self, event):
if False:
QMessageBox.question(
self.win, str(self.mi.label),
str(self.mi.help_text),
QMessageBox.Yes |
QMessageBox.No, QMessageBox.Yes)
self.frm = DetailForm(self.win, self.mi)
self.frm.show()
class DetailForm(QWidget):
def __init__(self, win, mi):
self.mi = mi
super().__init__(win)
self.setWindowTitle(str(self.mi.label))
self.initUI()
def initUI(self):
okButton = QPushButton("OK")
cancelButton = QPushButton("Cancel")
hbox = QHBoxLayout()
hbox.addStretch(1)
hbox.addWidget(okButton)
hbox.addWidget(cancelButton)
vbox = QVBoxLayout()
vbox.addStretch(1)
vbox.addLayout(hbox)
self.setLayout(vbox)
self.setGeometry(300, 300, 300, 150)
# self.show()
class LinoClient(QMainWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
textEdit = QTextEdit()
self.setCentralWidget(textEdit)
self.setGeometry(300, 300, 300, 220)
self.center()
self.setWindowTitle('qtclient.py')
self.setWindowIcon(QIcon('../../.static/logo.png'))
self.setToolTip('This is a <b>QWidget</b> widget')
self.menubar = self.menuBar()
user_type = rt.models.users.UserTypes.get_by_value('900')
menu = settings.SITE.get_site_menu(user_type)
self.load_menu(menu, self.menubar)
self.show()
self.statusBar().showMessage('Ready')
def load_menu(self, menu, menubar):
for mi in menu.items:
if isinstance(mi, Menu):
submenu = menubar.addMenu(str(mi.label))
self.load_menu(mi, submenu)
else:
a = QAction(QIcon(images_path.child('cancel.png')),
str(mi.label), self)
if mi.hotkey:
a.setShortcut(mi.hotkey)
a.setStatusTip(str(mi.help_text))
a.triggered.connect(ItemCaller(self, mi))
menubar.addAction(a)
# fileMenu = menubar.addMenu('&File')
exitAction = QAction(QIcon('exit.png'), '&Exit', self)
# exitAction.setShortcut('Ctrl+Q')
# exitAction.setStatusTip('Exit application')
# exitAction.triggered.connect(qApp.quit)
# fileMenu.addAction(exitAction)
# a = QAction(QIcon('detail.png'), '&Detail', self)
# a.triggered.connect(self.show_detail)
# fileMenu.addAction(a)
# self.toolbar = self.addToolBar('Exit')
# self.toolbar.addAction(exitAction)
# btn = QPushButton('Quit', self)
# btn.clicked.connect(QCoreApplication.instance().quit)
# btn.setToolTip('This is a <b>QPushButton</b> widget')
# btn.resize(btn.sizeHint())
# btn.move(50, 50)
# def show_detail(self, event):
# self.detail_form = DetailForm()
# self.detail_form.show()
def closeEvent(self, event):
if True:
event.accept()
return
reply = QMessageBox.question(self, 'MessageBox',
"This will close the window! Are you sure?",
QMessageBox.Yes |
QMessageBox.No, QMessageBox.Yes)
if reply == QMessageBox.Yes:
event.accept()
else:
event.ignore()
def center(self):
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
class Command(BaseCommand):
help = """Run a Qt client for this site."""
def handle(self, *args, **options):
app = QApplication(sys.argv)
self.ex = LinoClient()
# sys.exit(app.exec_())
return app.exec_()
| [
"[email protected]"
] | |
84e6607c8844f6a04b1d98959b10d09db2dde1c1 | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/pc/rtfcaccbndlgrptofcaggrif.py | 0325546ef5eaa31952748a03f1378eacc3fd4e64 | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,000 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class RtFcAccBndlGrpToFcAggrIf(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = TargetRelationMeta("cobra.model.pc.RtFcAccBndlGrpToFcAggrIf", "cobra.model.infra.AFcAccBndlGrp")
meta.moClassName = "pcRtFcAccBndlGrpToFcAggrIf"
meta.rnFormat = "rtfcAccBndlGrpToFcAggrIf"
meta.category = MoCategory.RELATIONSHIP_FROM_LOCAL
meta.label = "Abstraction of Leaf FC Access Policy Group for port-channel"
meta.writeAccessMask = 0x100000000001
meta.readAccessMask = 0x100000020001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.pc.FcAggrIf")
meta.superClasses.add("cobra.model.reln.From")
meta.superClasses.add("cobra.model.fv.From")
meta.superClasses.add("cobra.model.reln.Inst")
meta.rnPrefixes = [
('rtfcAccBndlGrpToFcAggrIf', False),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "parentSKey", "parentSKey", 39216, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("parentSKey", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "tCl", "tCl", 39137, PropCategory.REGULAR)
prop.label = "Target-class"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 11867
prop.defaultValueStr = "infraAFcAccBndlGrp"
prop._addConstant("infraAFcAccBndlGrp", None, 11867)
prop._addConstant("infraFcAccBndlGrp", None, 11873)
prop._addConstant("infraFcAccBndlPolGrp", None, 11874)
prop._addConstant("unspecified", "unspecified", 0)
meta.props.add("tCl", prop)
prop = PropMeta("str", "tDn", "tDn", 100, PropCategory.REGULAR)
prop.label = "Target-dn"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("tDn", prop)
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
meta.deploymentQueryPaths.append(DeploymentPathMeta("l1FcIfToEPg", "EPG", "cobra.model.fv.EPg"))
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
20b549f692809f3c36be35369593b5decb3c1ef4 | 7b1a5db0a067766a9805fe04105f6c7f9ff131f3 | /pysal/model/mgwr/tests/test_sel_bw.py | 856b65aa3b40931bfe9b658f698db76a492733df | [] | permissive | ocefpaf/pysal | 2d25b9f3a8bd87a7be3f96b825995a185624e1d0 | 7e397bdb4c22d4e2442b4ee88bcd691d2421651d | refs/heads/master | 2020-06-26T17:13:06.016203 | 2019-07-31T19:54:35 | 2019-07-31T19:54:35 | 199,696,188 | 0 | 0 | BSD-3-Clause | 2019-07-30T17:17:19 | 2019-07-30T17:17:18 | null | UTF-8 | Python | false | false | 9,038 | py | """
GWR is tested against results from GWR4
"""
import os
import numpy as np
from pysal.lib import io
import pysal.lib as ps
import unittest
from pysal.model.spglm.family import Gaussian, Poisson, Binomial
from ..sel_bw import Sel_BW
from numpy.testing import assert_allclose
class TestSelBWGaussian(unittest.TestCase):
def setUp(self):
data_path = ps.examples.get_path("GData_utm.csv")
data = io.open(data_path)
self.coords = list(zip(data.by_col('X'), data.by_col('Y')))
self.coords_longlat = list(
zip(data.by_col('Longitud'), data.by_col('Latitude')))
self.y = np.array(data.by_col('PctBach')).reshape((-1, 1))
rural = np.array(data.by_col('PctRural')).reshape((-1, 1))
pov = np.array(data.by_col('PctPov')).reshape((-1, 1))
black = np.array(data.by_col('PctBlack')).reshape((-1, 1))
fb = np.array(data.by_col('PctFB')).reshape((-1, 1))
self.X = np.hstack([rural, pov, black])
self.mgwr_X = np.hstack([fb, black, rural])
def test_golden_fixed_AICc(self):
bw1 = 211020.83
bw2 = Sel_BW(self.coords, self.y, self.X, kernel='bisquare',
fixed=True).search(criterion='AICc')
assert_allclose(bw1, bw2)
scipy_known = 211025.26298
scipy = Sel_BW(self.coords, self.y, self.X,
kernel='bisquare', fixed=True).search(
criterion='AICc', search_method='scipy')
assert_allclose(scipy_known, scipy, atol=1)
def test_golden_adapt_AICc(self):
bw1 = 93.0
bw2 = Sel_BW(self.coords, self.y, self.X, kernel='bisquare',
fixed=False).search(criterion='AICc')
assert_allclose(bw1, bw2)
def test_golden_adapt_AICc_Longlat(self):
bw1 = 92.0
bw2 = Sel_BW(self.coords_longlat, self.y, self.X, kernel='bisquare',
fixed=False, spherical=True).search(criterion='AICc')
assert_allclose(bw1, bw2)
def test_golden_fixed_AIC(self):
bw1 = 76201.66
bw2 = Sel_BW(self.coords, self.y, self.X, kernel='gaussian',
fixed=True).search(criterion='AIC')
assert_allclose(bw1, bw2)
scipy_known = 76199.81
scipy = Sel_BW(self.coords, self.y, self.X,
kernel='gaussian', fixed=True).search(
criterion='AIC', search_method='scipy')
assert_allclose(scipy_known, scipy, atol=1)
def test_golden_adapt_AIC(self):
bw1 = 50.0
bw2 = Sel_BW(self.coords, self.y, self.X, kernel='gaussian',
fixed=False).search(criterion='AIC')
assert_allclose(bw1, bw2)
def test_golden_fixed_BIC(self):
bw1 = 1117795.47
bw2 = Sel_BW(self.coords, self.y, self.X, kernel='gaussian',
fixed=True).search(criterion='BIC')
assert_allclose(bw1, bw2)
scipy_known = 1117806.16
scipy = Sel_BW(self.coords, self.y, self.X,
kernel='gaussian', fixed=True).search(
criterion='BIC', search_method='scipy')
assert_allclose(scipy_known, scipy, atol=1)
def test_golden_adapt_BIC(self):
bw1 = 62.0
bw2 = Sel_BW(self.coords, self.y, self.X, kernel='gaussian',
fixed=False).search(criterion='BIC')
assert_allclose(bw1, bw2)
def test_golden_fixed_CV(self):
bw1 = 130289.26
bw2 = Sel_BW(self.coords, self.y, self.X, kernel='gaussian',
fixed=True).search(criterion='CV')
assert_allclose(bw1, bw2)
scipy_known = 130363.55
scipy = Sel_BW(self.coords, self.y, self.X,
kernel='gaussian', fixed=True).search(
criterion='CV', search_method='scipy')
assert_allclose(scipy_known, scipy, atol=1)
def test_golden_adapt_CV(self):
bw1 = 68.0
bw2 = Sel_BW(self.coords, self.y, self.X, kernel='gaussian',
fixed=False).search(criterion='CV')
assert_allclose(bw1, bw2)
def test_interval_fixed_AICc(self):
bw1 = 211025.0
bw2 = Sel_BW(self.coords, self.y, self.X,
kernel='bisquare', fixed=True).search(
criterion='AICc', search_method='interval',
bw_min=211001.0, bw_max=211035.0, interval=2)
assert_allclose(bw1, bw2)
def test_interval_adapt_AICc(self):
bw1 = 93.0
bw2 = Sel_BW(self.coords, self.y, self.X,
kernel='bisquare', fixed=False).search(
criterion='AICc', search_method='interval',
bw_min=90.0, bw_max=95.0, interval=1)
assert_allclose(bw1, bw2)
def test_interval_fixed_AIC(self):
bw1 = 76175.0 #76169.00
bw2 = Sel_BW(self.coords, self.y, self.X,
kernel='gaussian', fixed=True).search(
criterion='AIC', search_method='interval',
bw_min=76161.0, bw_max=76175.0, interval=1)
assert_allclose(bw1, bw2)
def test_interval_adapt_AIC(self):
bw1 = 40.0 #50.0
bw2 = Sel_BW(self.coords, self.y, self.X,
kernel='gaussian', fixed=False).search(
criterion='AIC', search_method='interval',
bw_min=40.0, bw_max=60.0, interval=2)
assert_allclose(bw1, bw2)
def test_interval_fixed_BIC(self):
bw1 = 279461.0 #279451.00
bw2 = Sel_BW(self.coords, self.y, self.X,
kernel='gaussian', fixed=True).search(
criterion='BIC', search_method='interval',
bw_min=279441.0, bw_max=279461.0, interval=2)
assert_allclose(bw1, bw2)
def test_interval_adapt_BIC(self):
bw1 = 62.0
bw2 = Sel_BW(self.coords, self.y, self.X,
kernel='gaussian', fixed=False).search(
criterion='BIC', search_method='interval',
bw_min=52.0, bw_max=72.0, interval=2)
assert_allclose(bw1, bw2)
def test_interval_fixed_CV(self):
bw1 = 130400.0 #130406.00
bw2 = Sel_BW(self.coords, self.y, self.X,
kernel='gaussian', fixed=True).search(
criterion='CV', search_method='interval',
bw_min=130400.0, bw_max=130410.0, interval=1)
assert_allclose(bw1, bw2)
def test_interval_adapt_CV(self):
bw1 = 62.0 #68.0
bw2 = Sel_BW(self.coords, self.y, self.X,
kernel='gaussian', fixed=False).search(
criterion='CV', search_method='interval', bw_min=60.0,
bw_max=76.0, interval=2)
assert_allclose(bw1, bw2)
def test_MGWR_AICc(self):
bw1 = [101.0, 101.0, 117.0, 157.0]
std_y = (self.y - self.y.mean()) / self.y.std()
std_X = (self.mgwr_X - self.mgwr_X.mean(axis=0)) / self.mgwr_X.std(
axis=0)
selector = Sel_BW(self.coords, std_y, std_X, multi=True, constant=True)
bw2 = selector.search()
np.testing.assert_allclose(bw1, bw2)
def test_MGWR_AICc_Longlat(self):
bw1 = [104.0, 104.0, 103.0, 157.0]
std_y = (self.y - self.y.mean()) / self.y.std()
std_X = (self.mgwr_X - self.mgwr_X.mean(axis=0)) / self.mgwr_X.std(
axis=0)
selector = Sel_BW(self.coords_longlat, std_y, std_X, multi=True,
constant=True, spherical=True)
bw2 = selector.search()
assert_allclose(bw1, bw2)
class TestGWRSelBWPoisson(unittest.TestCase):
def setUp(self):
data_path = os.path.join(
os.path.dirname(__file__), 'tokyo/Tokyomortality.csv')
data = io.open(data_path, mode='Ur')
self.coords = list(
zip(data.by_col('X_CENTROID'), data.by_col('Y_CENTROID')))
self.y = np.array(data.by_col('db2564')).reshape((-1, 1))
self.off = np.array(data.by_col('eb2564')).reshape((-1, 1))
OCC = np.array(data.by_col('OCC_TEC')).reshape((-1, 1))
OWN = np.array(data.by_col('OWNH')).reshape((-1, 1))
POP = np.array(data.by_col('POP65')).reshape((-1, 1))
UNEMP = np.array(data.by_col('UNEMP')).reshape((-1, 1))
self.X = np.hstack([OCC, OWN, POP, UNEMP])
def test_golden_adapt_AICc_Poisson_w_offset(self):
bw1 = 95.0
bw2 = Sel_BW(self.coords, self.y, self.X, kernel='bisquare',
family=Poisson(), fixed=False,
offset=self.off).search(criterion='AICc')
assert_allclose(bw1, bw2)
def test_golden_adapt_AICc_Poisson_wo_offset(self):
bw1 = 51.0
bw2 = Sel_BW(self.coords, self.y, self.X, kernel='bisquare',
family=Poisson(), fixed=False).search(criterion='AICc')
assert_allclose(bw1, bw2)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
723d1ab3fba8472207d08566888754ff280a6ce1 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02622/s420656500.py | ba3844bb33b39a0bd750b8488ba05864255e60b4 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | s = [x for x in input()]
t = [y for y in input()]
count = 0
for _ in range(len(s)):
if s[_] != t[_]:
count+=1
print(count) | [
"[email protected]"
] | |
ea8181be3486f3495b71aed3582a1cd0ce049fb5 | 412d0f01ab87c6b1e96f5f8afc263a3c33188b2f | /plots/fps_bar_chart.py | 1b735fc4f4b009283bc7ea4997f878756bc13cae | [
"MIT"
] | permissive | neevparikh/sample-factory | 7e48a8b4c7d0b534642bd90d28e66677f03b715e | 1b57647a231359ed6794db72bcc39b4ebb01b39e | refs/heads/master | 2023-03-16T05:15:05.945225 | 2021-03-09T02:28:03 | 2021-03-09T02:28:03 | 290,346,928 | 0 | 0 | MIT | 2020-08-25T23:36:01 | 2020-08-25T23:36:00 | null | UTF-8 | Python | false | false | 1,909 | py | import math
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
sns.set()
labels = ['20', '40', '80', '160', '320', '640']
# fps_by_method_10_core_cpu = dict(
# deepmind_impala=[8590, 10596, 10941, 10928, 13328, math.nan],
# rllib_appo=[9384, 9676, 11171, 11328, 11590, 11345],
# ours=[11565, 16982, 25068, 37410, 46977, 52033]
# )
# data = fps_by_method_10_core_cpu
fps_by_method_36_core_cpu = dict(
deepmind_impala=[6951, 8191, 8041, 9900, 10014, math.nan],
rllib_appo=[13308, 23608, 30568, 31002, 32840, 33784],
ours=[11586, 20410, 33326, 46243, 70124, 86753],
)
data = fps_by_method_36_core_cpu
# ours: 160=40x4, 320=40x8 with 3072 bs, 640=80x8 with 3072 bs
# multi-policy:
# 2 policies, 640 actors, 93284 FPS
# 4 policies: 1600 actors, 116320 FPS
# 8 policies: 1600 actors,
x = np.arange(len(labels)) # the label locations
width = 0.25 # the width of the bars
fig, ax = plt.subplots(figsize=(12, 8))
item_idx = 0
bars = dict()
for key, value in data.items():
rects = ax.bar(x + item_idx * width - len(data) * width / 2, value, width, label=key)
bars[key] = rects
item_idx += 1
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_xlabel('Num. environments in parallel')
ax.set_ylabel('Environment frames per second')
ax.set_title('Throughput of different RL methods')
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend()
def autolabel(rects):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
ax.annotate('{}'.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords='offset points',
ha='center', va='bottom')
autolabel(bars['ours'])
fig.tight_layout()
plt.show()
| [
"[email protected]"
] | |
cd99c5c4b1cb47999230fedf11bf9d56d4b14c76 | bd5f807161da0a9d3f6f8c5c3b4da073545eaafc | /day_1/print_function.py | 86260c5a4cb8249a92687577759a3a34cc0eb9c8 | [
"MIT"
] | permissive | anishLearnsToCode/python-workshop-7 | a6a497ddb6cf41a2097ac976426710ca4aa41987 | 2d5933be2629600f5f9e8efea58403421737a299 | refs/heads/main | 2023-02-07T21:00:01.308260 | 2020-12-27T08:22:14 | 2020-12-27T08:22:14 | 323,645,763 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 131 | py | print('i am batman', end='\n')
print('hello world', end='\t\t')
print('this is cool\n', end=' ----- ')
print('i am still here')
| [
"[email protected]"
] | |
74612d03df0d217ddea8a2be6fec57fb41f85c27 | bb68c958809899a24ec7d250adacc70c30203b04 | /rti_python/Ensemble/NmeaData.py | 5ca12f52eb8356dbba94010a0c6d4f1277627fc5 | [] | no_license | jjt53/ReadPlotCSV_streamlit | d9e432ec83ec930d3e6aa6b8f07bf3329c8081b8 | 3fddf73033e3648bc86ccbf0c5cce2b7868250d9 | refs/heads/main | 2023-01-29T14:05:35.667192 | 2020-12-09T17:17:23 | 2020-12-09T17:17:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,745 | py | import pynmea2
from rti_python.Ensemble.Ensemble import Ensemble
import logging
from pygeodesy import ellipsoidalVincenty
from decimal import *
class NmeaData:
"""
NMEA DataSet.
String data to decode.
"""
def __init__(self, num_elements=0, element_multiplier=1):
self.ds_type = 50 # Bytes
self.num_elements = num_elements
self.element_multiplier = element_multiplier
self.image = 0
self.name_len = 8
self.Name = "E000011\0"
self.nmea_sentences = []
# Initialize with bad values
self.GPGGA = None
self.GPVTG = None
self.GPRMC = None
self.GPGLL = None
self.GPGSV = None
self.GPGSA = None
self.GPHDT = None
self.GPHDG = None
self.latitude = 0.0
self.longitude = 0.0
self.speed_knots = 0.0 # Speed in Knots
self.speed_m_s = 0.0 # Speed in m/s
self.heading = 0.0
self.datetime = None # Date and Time from GGA
def decode(self, data):
"""
Take the data bytearray. Decode the data to populate
the NMEA data.
:param data: Bytearray for the dataset.
"""
packet_pointer = Ensemble.GetBaseDataSize(self.name_len)
nmea_str = str(data[packet_pointer:], "UTF-8")
self.num_elements = len(nmea_str)
for msg in nmea_str.split():
# Add the NMEA message
self.add_nmea(msg)
logging.debug(nmea_str)
logging.debug(self.nmea_sentences)
def add_nmea(self, msg):
try:
# Increment the number of elements
self.num_elements += len(msg)
# Parse the NMEA data
nmea_msg = pynmea2.parse(msg)
if isinstance(nmea_msg, pynmea2.types.talker.GGA):
self.GPGGA = nmea_msg
self.latitude = nmea_msg.latitude
self.longitude = nmea_msg.longitude
self.datetime = nmea_msg.timestamp
if isinstance(nmea_msg, pynmea2.types.talker.VTG):
self.GPVTG = nmea_msg
self.speed_knots = nmea_msg.spd_over_grnd_kts
self.speed_m_s = nmea_msg.spd_over_grnd_kts * Decimal(0.51444444444444)
if isinstance(nmea_msg, pynmea2.types.talker.RMC):
self.GPRMC = nmea_msg
if isinstance(nmea_msg, pynmea2.types.talker.GLL):
self.GPGLL = nmea_msg
if isinstance(nmea_msg, pynmea2.types.talker.GSV):
self.GPGSV = nmea_msg
if isinstance(nmea_msg, pynmea2.types.talker.GSA):
self.GPGSA = nmea_msg
if isinstance(nmea_msg, pynmea2.types.talker.HDT):
self.GPHDT = nmea_msg
self.heading = nmea_msg.heading
if isinstance(nmea_msg, pynmea2.types.talker.HDG):
self.GPHDG = nmea_msg
self.nmea_sentences.append(msg.strip())
except pynmea2.nmea.ParseError as pe:
logging.debug("Bad NMEA String")
except Exception as e:
logging.debug("Error decoding NMEA msg", e)
def encode(self):
"""
Encode the data into RTB format.
:return:
"""
result = []
# Combine all the NMEA strings into one long string
str_nmea = ""
for nmea in self.nmea_sentences:
str_nmea += nmea + "\n"
# Generate the number of elements from the number of characters
self.num_elements = len(str_nmea)
# Generate header
result += Ensemble.generate_header(self.ds_type,
self.num_elements,
self.element_multiplier,
self.image,
self.name_len,
self.Name)
# Convert the strings to bytes
result += str_nmea.encode("UTF-8")
return result
def encode_csv(self, dt, ss_code, ss_config, blank=0, bin_size=0):
"""
Encode into CSV format.
:param dt: Datetime object.
:param ss_code: Subsystem code.
:param ss_config: Subsystem Configuration
:param blank: Blank or first bin position in meters.
:param bin_size: Bin size in meters.
:return: List of CSV lines.
"""
str_result = []
# Create the CSV strings for each NMEA string
for nmea in self.nmea_sentences:
str_result.append(Ensemble.gen_csv_line(dt, Ensemble.CSV_NMEA, ss_code, ss_config, 0, 0, blank, bin_size, nmea))
return str_result
def get_new_position(self, distance: float, bearing: float):
"""
This function is typically used to create a ship track plot with a vector to represent the
water current. The distance will be the magnitude of the water currents, the bearing will be the
direction of the water currents. This will allow you to plot the LatLon and also a vector off this
LatLon point.
:param distance: Distance (magnitude)
:param bearing: Direction to travel
:return The new position based on the input and current position. (lat, lon)
"""
return NmeaData.get_new_lat_lon_position(self.latitude, self.longitude, distance, bearing)
@staticmethod
def get_new_lat_lon_position(latitude: float, longitude: float, distance: float, bearing: float):
"""
This function is typically used to create a ship track plot with a vector to represent the
water current. The distance will be the magnitude of the water currents, the bearing will be the
direction of the water currents. This will allow you to plot the LatLon and also a vector off this
LatLon point.
:param latitude: Start latitude position
:param longitude: Start longitude position
:param distance: Distance (magnitude)
:param bearing: Direction to travel
:return The new position based on the input and current position. (lat, lon)
"""
# Choose a ellipsoid
LatLon = ellipsoidalVincenty.LatLon
# Verify we have a latitude and longitude value
if latitude and longitude:
# Set the current location
curr_loc = LatLon(latitude, longitude)
# Get the new position based on distance and bearing
new_loc = curr_loc.destination(distance=distance, bearing=bearing)
# Return lat, lon
return new_loc.lat, new_loc.lon
return 0.0, 0.0
| [
"[email protected]"
] | |
657b51d419bef4689b2a3a559a1039f993d7a58b | db5684eeac1c7359017a5d109028ce2b8b49d1a7 | /app_rbac/service/AutoFindUrls.py | fbad0d42e31869cebeef14f5a4056828e68f6a7c | [] | no_license | Alan-AW/CrmSys | a4873c52e1f6bb05c45377459b0a040ff7dbbc75 | 95119dd7b96b981a00541e8adcee410eb1fbe865 | refs/heads/main | 2023-08-22T08:04:44.207347 | 2021-10-13T08:08:44 | 2021-10-13T08:08:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,541 | py | from collections import OrderedDict
from django.conf import settings as sys
from django.utils.module_loading import import_string # 内置工具,根据字符串进行导入模块
from django.urls import URLResolver, URLPattern
import re
"""
自动发现项目中的url方法
"""
class AutoFindUrl:
def check_url_exclude(self, url):
"""
白名单设置;排除一些特定的url的查找
:param url:
:return:
"""
for regex in sys.AUTO_DISCOVER_EXCLUDE:
if re.match(regex, url):
return True
def recursion_urls(self, pre_namespace, pre_url, url_patterns, url_ordered_dict):
"""
:param pre_namespace: namespace的前缀, 以后用于拼接name
:param pre_url: url的前缀, 以后用于拼接url
:param url_patterns: 用于循环的路由, 路由关系列表
:param url_ordered_dict: 用于保存递归中获取的所有路由,有序字典
:return:
"""
for item in url_patterns:
if isinstance(item, URLPattern): # 非路由分发
if not item.name:
continue
name = item.name if not pre_namespace else "%s:%s" % (pre_namespace, item.name)
url = pre_url + item.pattern.regex.pattern
url = url.replace('^', '').replace('$', '')
if self.check_url_exclude(url):
continue
url_ordered_dict[name] = {'name': name, 'url': url}
elif isinstance(item, URLResolver): # 路由分发, 递归
if pre_namespace:
namespace = "%s:%s" % (pre_namespace, item.namespace) if item.namespace else item.namespace
else:
namespace = item.namespace if item.namespace else None
self.recursion_urls(namespace, pre_url + item.pattern.regex.pattern, item.url_patterns, url_ordered_dict)
def get_all_url_dict(self):
"""
自动发现项目中的URL(必须有 name 别名)
:return: 所有url的有序字典
"""
url_ordered_dict = OrderedDict() # {'rbac:menu_list': {name:'rbac:menu_list', url: 'xxx/xxx/menu_list'}}
md = import_string(sys.ROOT_URLCONF) # 根据字符串的形式去导入一个模块,在settings中 ROOT_URLCONF 指向的就是项目根路由的文件地址
self.recursion_urls(None, '/', md.urlpatterns, url_ordered_dict) # 递归的获取所有的url
return url_ordered_dict
| [
"[email protected]"
] | |
9a2222854f5cf34881162cdee222723b8dd7793d | c8c855a6ebb3b3101e5c3a80b94514c36b103495 | /semana_3/Calculadora.py | 677552a81732c0ddcb86782beae94726fe52561a | [] | no_license | K-A-R-L-A-Robles/poo-1719110219 | 835965c0e3100c9d6770678eb67920945942fa80 | 7d1fc57cd4157e5b52a153210311821d8290144d | refs/heads/master | 2022-11-03T04:54:42.675869 | 2020-06-15T03:46:42 | 2020-06-15T03:46:42 | 265,970,361 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 812 | py | class calculadora:
"atríbutos"
signos=30
botones =30
baterias= 2
led=1
carcasa=1
#métodos
def encender(self):
print("encender")
def apagar(self):
print("apagar")
def __init__(self):
pass
class casio(calculadora):
#atríbutos
color="gris"
longitud= "15cm"
#métodos
def sumar(self):
print("sumar")
def restar(self):
print("restar")
def __init__(self):
print("constructor de casio")
pass
casio= casio()
print("signos= "+str(casio.signos))
print("botones= "+str(casio.botones))
print("baterias= "+str(casio.baterias))
print("led= "+str(casio.led))
("carcasa= "+str(casio.carcasa))
print("color= "+str(casio.color))
print("longitud= "+str(casio.longitud))
casio.encender()
casio.apagar()
casio.sumar()
casio.restar() | [
"[email protected]"
] | |
010d835db31f6a76e42a04916bd4a4af607762b4 | 62e58c051128baef9452e7e0eb0b5a83367add26 | /edifact/D08B/COMDISD08BUN.py | 100dbc7a1d02e66ba3a3780d0077c6f131fd407a | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 1,035 | py | #Generated by bots open source edi translator from UN-docs.
from bots.botsconfig import *
from edifact import syntax
from recordsD08BUN import recorddefs
structure = [
{ID: 'UNH', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGM', MIN: 1, MAX: 1},
{ID: 'RFF', MIN: 0, MAX: 9},
{ID: 'DTM', MIN: 0, MAX: 9},
{ID: 'CUX', MIN: 0, MAX: 9},
{ID: 'NAD', MIN: 0, MAX: 99, LEVEL: [
{ID: 'CTA', MIN: 0, MAX: 1},
{ID: 'COM', MIN: 0, MAX: 5},
]},
{ID: 'DOC', MIN: 0, MAX: 9999, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 9},
{ID: 'MOA', MIN: 0, MAX: 2},
{ID: 'AJT', MIN: 0, MAX: 9, LEVEL: [
{ID: 'FTX', MIN: 0, MAX: 9},
]},
{ID: 'INP', MIN: 0, MAX: 9, LEVEL: [
{ID: 'FTX', MIN: 0, MAX: 9},
]},
{ID: 'DLI', MIN: 0, MAX: 9999, LEVEL: [
{ID: 'MOA', MIN: 0, MAX: 2},
{ID: 'AJT', MIN: 0, MAX: 9, LEVEL: [
{ID: 'FTX', MIN: 0, MAX: 9},
]},
]},
]},
{ID: 'UNT', MIN: 1, MAX: 1},
]},
]
| [
"[email protected]"
] | |
dc83a61491aaa7474ca75f5417cccf54a741e2a2 | 1f7d287ef90041e20468513a26a39e1f3d221289 | /Level-2/s18/guvi-L2-s18-py06.py | 21f5db26fcc71bb3aa20c4faae14685578b46595 | [] | no_license | ksthacker/python | d787d69f954c0e9b59b0cc96a8b8fc5c0594d8a0 | 3a3775e1b9349e313f8c96ea11eade54a7e9bf54 | refs/heads/master | 2021-04-27T16:32:40.923316 | 2019-08-21T04:50:22 | 2019-08-21T04:50:22 | 122,303,461 | 0 | 17 | null | 2019-10-03T14:59:51 | 2018-02-21T07:09:32 | Python | UTF-8 | Python | false | false | 262 | py | import sys, string, math
s1,s2 = input().split()
dic1 = {}
dic2 = {}
for c in s1 :
dic1[c] = dic1.get(c,0) + 1
for c in s2 :
dic2[c] = dic2.get(c,0) + 1
#print(dic1,dic2)
if dic1.keys() == dic2.keys() :
print('true')
else :
print('false')
| [
"[email protected]"
] | |
b41ba3a6a31fab6c7ed42dd38ecc6821786344d5 | a7685d315e6616cc2b6d43587bb19ead4324fb2a | /cci_mailchimp/wizard/extract_results_advertising.py | 4a5fc3227990c4926d43be7d42084fc091237cd3 | [] | no_license | philmervdm/modules_cci_odoo8 | 472ea68de409e876722413afdd873d6a7827744e | 603144219a86e805f7603cfafc0fb05a78166eef | refs/heads/master | 2021-01-09T20:02:58.326569 | 2017-05-06T15:45:03 | 2017-05-06T15:45:03 | 60,332,279 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 13,732 | py | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# Version 1.0 Philmer
# This wizard searchs for emails addresses in a list of given table
import time
import datetime
#import mailchimp
import base64
from xlwt import *
from openerp import models, fields, api, _
def french_date(string):
return string[8:10] + "/" + string[5:7] + '/' + string[0:4]
class cci_mailchimp_results_adv(models.TransientModel):
_name = 'cci.mailchimp.results.adv'
url = fields.Char(string='URL of the advertising', help='You can give also only a part of the complete URL.\nAll URL containing this part will be taken into account.', required=True)
from_date = fields.Date(string='From', help='Date of the first Revue de Presse', required=True)
to_date = fields.Date(string='To', help='Date of the last Revue de Presse', required=True)
@api.multi
def extract(self):
# Initialisation of result file
wb1 = Workbook()
ws1 = wb1.add_sheet(u'Résultats')
ws1.write(0, 0, u'URL :')
ws1.write(1, 0, u'De :')
ws1.write(2, 0, u'à :')
ws1.write(3, 0, u'Seules les dates et les régions où l\'URL a été utilisée avec un tracking activé apparaissent ici.')
ws1.write(0, 1, data['form']['url'])
ws1.write(1, 1, french_date(self.from_date))
ws1.write(2, 1, french_date(self.to_date))
ws1.write(3, 0, u'Seules les revue de presse ayant au moins un résultat sont présentées ici')
ws1.write(5, 0, u'RDP envoyées :')
ws1.write(6, 0, u'Moyenne Uniques clicks/jour :')
ws1.write(9, 0, u'Date')
ws1.write(8, 1, u'Total')
ws1.write(9, 1, u'Envois')
ws1.write(9, 2, u'Ouverts')
ws1.write(9, 3, u'Ouv. uniques')
ws1.write(9, 4, u'Clicks')
ws1.write(9, 5, u'Cl. uniques')
ws1.write(9, 6, u'URL clicks')
ws1.write(9, 7, u'URL Cl. Uniques')
xls_line = 10
# Possibles revue de presse
rdps = {}
rdps[1] = {'name':u'- BW', 'count':0}
rdps[2] = {'name':u'- Hainaut', 'count':0}
rdps[3] = {'name':u'- Liège Verviers', 'count':0}
rdps[4] = {'name':u'- Namur', 'count':0}
rdps[5] = {'name':u'- Wapi', 'count':0}
rdps[6] = {'name':u'- VL', 'count':0}
rdps[7] = {'name':u'- Inconnue', 'count':0}
# connections to mailchimp
# We get the API key
parameter_obj = self.env['ir.config_parameter']
param_values = parameter_obj.get_param(['MailChimpAPIKey'])
if param_values.has_key('MailChimpAPIKey'):
mailchimp_server = mailchimp.Mailchimp(param_values['MailChimpAPIKey'], False)
mailchimp_campaigns = mailchimp.Campaigns(mailchimp_server)
mailchimp_reports = mailchimp.Reports(mailchimp_server)
start = 0
limit = 25
result = mailchimp_campaigns.list({'status':'sent'}, start, limit, 'send_time', 'DESC')
total = result['total']
url = self.url
date_from = self.from_date
date_to = self.to_date
final_count = 0
dates = []
dResults = {}
while start <= ((total / limit) + 1):
result = mailchimp_campaigns.list({'status':'sent'}, start, limit, 'send_time', 'DESC')
for line in result['data']:
if line['emails_sent'] and line['send_time'][0:10] >= date_from and line['send_time'][0:10] <= date_to:
if line['tracking']['html_clicks']:
result = mailchimp_reports.clicks(line['id'])
for click in result['total']:
if url in click['url']:
rdp_id = False
for (key, rdp) in rdps.items():
if rdp['name'] in line['title']:
rdp_id = key
break
if not rdp_id:
rdp_id = 6
rdps[rdp_id]['count'] += 1
resu = {
'date': line['send_time'][0:10],
'rdp_id': rdp_id,
'sendings': line['emails_sent'],
'opens': line['summary']['opens'],
'u_opens': line['summary']['unique_opens'],
'clicks': line['summary']['clicks'],
'u_clicks': line['summary']['unique_clicks'],
'url_clicks': click['clicks'],
'u_url_clicks': click['unique']
}
if line['send_time'][0:10] not in dates:
dates.append(line['send_time'][0:10])
if dResults.has_key((line['send_time'][0:10], rdp_id)):
# dResults[(line['send_time'][0:10],rdp_id)]['opens'] += line['summary']['opens']
# dResults[(line['send_time'][0:10],rdp_id)]['u_opens'] += line['summary']['unique_opens']
# dResults[(line['send_time'][0:10],rdp_id)]['clicks'] += line['summary']['clicks']
# dResults[(line['send_time'][0:10],rdp_id)]['u_clicks'] += line['summary']['unique_clicks']
dResults[(line['send_time'][0:10], rdp_id)]['url_clicks'] += click['clicks']
dResults[(line['send_time'][0:10], rdp_id)]['u_url_clicks'] += click['unique']
else:
dResults[(line['send_time'][0:10], rdp_id)] = resu
final_count += 1
start += 1
final_rdps = []
for (key, rdp) in rdps.items():
if rdp['count'] > 0:
final_rdps.append(key)
final_rdps.sort()
index = 1
for rdp_id in final_rdps:
ws1.write(8, (index * 7) + 1, rdps[rdp_id]['name'][2:])
ws1.write(9, (index * 7) + 1, u'Envois')
ws1.write(9, (index * 7) + 2, u'Ouverts')
ws1.write(9, (index * 7) + 3, u'Ouv. uniques')
ws1.write(9, (index * 7) + 4, u'Clicks')
ws1.write(9, (index * 7) + 5, u'Cl. uniques')
ws1.write(9, (index * 7) + 6, u'URL clicks')
ws1.write(9, (index * 7) + 7, u'URL Cl. Uniques')
index += 1
dates.sort(reverse=True)
final_results = {}
full_list = [0, ]
full_list.extend(final_rdps)
for rdp_id in full_list: # ## 0 will serve for totals
final_results[rdp_id] = {'sendings':0, 'opens':0, 'u_opens':0, 'clicks':0, 'u_clicks':0, 'url_clicks':0, 'u_url_clicks':0}
for date in dates:
ws1.write(xls_line, 0, french_date(date))
index = 1
total_sendings = 0
total_opens = 0
total_u_opens = 0
total_clicks = 0
total_u_clicks = 0
total_url_clicks = 0
total_u_url_clicks = 0
for rdp_id in final_rdps:
if dResults.has_key((date, rdp_id)):
ws1.write(xls_line, (index * 7) + 1, dResults[(date, rdp_id)]['sendings'])
ws1.write(xls_line, (index * 7) + 2, dResults[(date, rdp_id)]['opens'])
ws1.write(xls_line, (index * 7) + 3, dResults[(date, rdp_id)]['u_opens'])
ws1.write(xls_line, (index * 7) + 4, dResults[(date, rdp_id)]['clicks'])
ws1.write(xls_line, (index * 7) + 5, dResults[(date, rdp_id)]['u_clicks'])
ws1.write(xls_line, (index * 7) + 6, dResults[(date, rdp_id)]['url_clicks'])
ws1.write(xls_line, (index * 7) + 7, dResults[(date, rdp_id)]['u_url_clicks'])
total_sendings += dResults[(date, rdp_id)]['sendings']
total_opens += dResults[(date, rdp_id)]['opens']
total_u_opens += dResults[(date, rdp_id)]['u_opens']
total_clicks += dResults[(date, rdp_id)]['clicks']
total_u_clicks += dResults[(date, rdp_id)]['u_clicks']
total_url_clicks += dResults[(date, rdp_id)]['url_clicks']
total_u_url_clicks += dResults[(date, rdp_id)]['u_url_clicks']
# cumulates for final line
for col in [0, rdp_id]:
for column_name in ['sendings', 'opens', 'u_opens', 'clicks', 'u_clicks', 'url_clicks', 'u_url_clicks']:
final_results[col][column_name] += dResults[(date, rdp_id)][column_name]
else:
ws1.write(xls_line, (index * 7) + 1, 0)
ws1.write(xls_line, (index * 7) + 2, 0)
ws1.write(xls_line, (index * 7) + 3, 0)
ws1.write(xls_line, (index * 7) + 4, 0)
ws1.write(xls_line, (index * 7) + 5, 0)
ws1.write(xls_line, (index * 7) + 6, 0)
ws1.write(xls_line, (index * 7) + 7, 0)
index += 1
ws1.write(xls_line, 1, total_sendings)
ws1.write(xls_line, 2, total_opens)
ws1.write(xls_line, 3, total_u_opens)
ws1.write(xls_line, 4, total_clicks)
ws1.write(xls_line, 5, total_u_clicks)
ws1.write(xls_line, 6, total_url_clicks)
ws1.write(xls_line, 7, total_u_url_clicks)
xls_line += 1
ws1.write(5, 1, final_count)
ws1.write(6, 1, '%.2f' % ((final_results[0]['u_url_clicks'] * 1.0) / len(dates)))
ws1.write(xls_line, 0, u'Totaux')
index = 0
for rdp_id in full_list:
ws1.write(xls_line, (index * 7) + 1, final_results[rdp_id]['sendings'])
ws1.write(xls_line, (index * 7) + 2, final_results[rdp_id]['opens'])
ws1.write(xls_line, (index * 7) + 3, final_results[rdp_id]['u_opens'])
ws1.write(xls_line, (index * 7) + 4, final_results[rdp_id]['clicks'])
ws1.write(xls_line, (index * 7) + 5, final_results[rdp_id]['u_clicks'])
ws1.write(xls_line, (index * 7) + 6, final_results[rdp_id]['url_clicks'])
ws1.write(xls_line, (index * 7) + 7, final_results[rdp_id]['u_url_clicks'])
index += 1
# save the final result file
wb1.save('res_advert.xls')
result_file = open('res_advert.xls', 'rb').read()
# give the result to the user
msg = u'Save the File with \'.xls\' extension.'
res_advert_xls = base64.encodestring(result_file)
else:
msg = u'The parameter MailChimpAPIKey is missing !\nImpossible to extract something.'
ctx = self.env.context.copy()
ctx.update({'msg': msg, 'res_advert_xls': res_advert_xls})
view = self.env.ref('cci_mailchimp.view_cci_mailchimp_results_adv_msg')
return {
'name': _('Notification'),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'cci.mailchimp.results.adv.msg',
'views': [(view.id, 'form')],
'view_id': view.id,
'target': 'new',
'context': ctx,
}
class cci_mailchimp_results_adv_msg(models.TransientModel):
_name = 'cci.mailchimp.results.adv.msg'
msg = fields.Text(string='File created', readonly=True)
res_advert_xls = fields.Binary(string='Prepared file', readonly=True)
@api.model
def default_get(self, fields):
rec = super(cci_mailchimp_results_adv_msg, self).default_get(fields)
if self.env.context.get('msg'):
rec['msg'] = self.env.context['msg']
if self.env.context.get('res_advert_xls'):
rec['res_advert_xls'] = self.env.context['res_advert_xls']
return rec
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"[email protected]"
] | |
dafbc6f303cb54a88a957ad7b6ca81fe72a58dcf | 82fce9aae9e855a73f4e92d750e6a8df2ef877a5 | /Lab/venv/lib/python3.8/site-packages/OpenGL/raw/GL/VERSION/GL_4_0.py | e71cd5aca4e802fb6dbe967d20d16fbe9ccabd69 | [] | no_license | BartoszRudnik/GK | 1294f7708902e867dacd7da591b9f2e741bfe9e5 | 6dc09184a3af07143b9729e42a6f62f13da50128 | refs/heads/main | 2023-02-20T19:02:12.408974 | 2021-01-22T10:51:14 | 2021-01-22T10:51:14 | 307,847,589 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,207 | py | '''Autogenerated by xml_generate script, do not edit!'''
import ctypes
from OpenGL import platform as _p, arrays
from OpenGL.constant import Constant as _C
# End users want this...
from OpenGL.raw.GL import _errors
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
_EXTENSION_NAME = 'GL_VERSION_GL_4_0'
def _f(function):
return _p.createFunction(function, _p.PLATFORM.GL, 'GL_VERSION_GL_4_0', error_checker=_errors._error_checker)
GL_ACTIVE_SUBROUTINES = _C('GL_ACTIVE_SUBROUTINES', 0x8DE5)
GL_ACTIVE_SUBROUTINE_MAX_LENGTH = _C('GL_ACTIVE_SUBROUTINE_MAX_LENGTH', 0x8E48)
GL_ACTIVE_SUBROUTINE_UNIFORMS = _C('GL_ACTIVE_SUBROUTINE_UNIFORMS', 0x8DE6)
GL_ACTIVE_SUBROUTINE_UNIFORM_LOCATIONS = _C('GL_ACTIVE_SUBROUTINE_UNIFORM_LOCATIONS', 0x8E47)
GL_ACTIVE_SUBROUTINE_UNIFORM_MAX_LENGTH = _C('GL_ACTIVE_SUBROUTINE_UNIFORM_MAX_LENGTH', 0x8E49)
GL_COMPATIBLE_SUBROUTINES = _C('GL_COMPATIBLE_SUBROUTINES', 0x8E4B)
GL_DOUBLE_MAT2=_C('GL_DOUBLE_MAT2',0x8F46)
GL_DOUBLE_MAT2x3=_C('GL_DOUBLE_MAT2x3',0x8F49)
GL_DOUBLE_MAT2x4=_C('GL_DOUBLE_MAT2x4',0x8F4A)
GL_DOUBLE_MAT3=_C('GL_DOUBLE_MAT3',0x8F47)
GL_DOUBLE_MAT3x2=_C('GL_DOUBLE_MAT3x2',0x8F4B)
GL_DOUBLE_MAT3x4=_C('GL_DOUBLE_MAT3x4',0x8F4C)
GL_DOUBLE_MAT4=_C('GL_DOUBLE_MAT4',0x8F48)
GL_DOUBLE_MAT4x2=_C('GL_DOUBLE_MAT4x2',0x8F4D)
GL_DOUBLE_MAT4x3=_C('GL_DOUBLE_MAT4x3',0x8F4E)
GL_DOUBLE_VEC2=_C('GL_DOUBLE_VEC2',0x8FFC)
GL_DOUBLE_VEC3=_C('GL_DOUBLE_VEC3',0x8FFD)
GL_DOUBLE_VEC4=_C('GL_DOUBLE_VEC4',0x8FFE)
GL_DRAW_INDIRECT_BUFFER=_C('GL_DRAW_INDIRECT_BUFFER',0x8F3F)
GL_DRAW_INDIRECT_BUFFER_BINDING=_C('GL_DRAW_INDIRECT_BUFFER_BINDING',0x8F43)
GL_FRACTIONAL_EVEN=_C('GL_FRACTIONAL_EVEN',0x8E7C)
GL_FRACTIONAL_ODD=_C('GL_FRACTIONAL_ODD',0x8E7B)
GL_FRAGMENT_INTERPOLATION_OFFSET_BITS=_C('GL_FRAGMENT_INTERPOLATION_OFFSET_BITS',0x8E5D)
GL_GEOMETRY_SHADER_INVOCATIONS=_C('GL_GEOMETRY_SHADER_INVOCATIONS',0x887F)
GL_INT_SAMPLER_CUBE_MAP_ARRAY=_C('GL_INT_SAMPLER_CUBE_MAP_ARRAY',0x900E)
GL_ISOLINES=_C('GL_ISOLINES',0x8E7A)
GL_MAX_COMBINED_TESS_CONTROL_UNIFORM_COMPONENTS=_C('GL_MAX_COMBINED_TESS_CONTROL_UNIFORM_COMPONENTS',0x8E1E)
GL_MAX_COMBINED_TESS_EVALUATION_UNIFORM_COMPONENTS=_C('GL_MAX_COMBINED_TESS_EVALUATION_UNIFORM_COMPONENTS',0x8E1F)
GL_MAX_FRAGMENT_INTERPOLATION_OFFSET=_C('GL_MAX_FRAGMENT_INTERPOLATION_OFFSET',0x8E5C)
GL_MAX_GEOMETRY_SHADER_INVOCATIONS=_C('GL_MAX_GEOMETRY_SHADER_INVOCATIONS',0x8E5A)
GL_MAX_PATCH_VERTICES=_C('GL_MAX_PATCH_VERTICES',0x8E7D)
GL_MAX_PROGRAM_TEXTURE_GATHER_OFFSET=_C('GL_MAX_PROGRAM_TEXTURE_GATHER_OFFSET',0x8E5F)
GL_MAX_SUBROUTINES=_C('GL_MAX_SUBROUTINES',0x8DE7)
GL_MAX_SUBROUTINE_UNIFORM_LOCATIONS=_C('GL_MAX_SUBROUTINE_UNIFORM_LOCATIONS',0x8DE8)
GL_MAX_TESS_CONTROL_INPUT_COMPONENTS=_C('GL_MAX_TESS_CONTROL_INPUT_COMPONENTS',0x886C)
GL_MAX_TESS_CONTROL_OUTPUT_COMPONENTS=_C('GL_MAX_TESS_CONTROL_OUTPUT_COMPONENTS',0x8E83)
GL_MAX_TESS_CONTROL_TEXTURE_IMAGE_UNITS=_C('GL_MAX_TESS_CONTROL_TEXTURE_IMAGE_UNITS',0x8E81)
GL_MAX_TESS_CONTROL_TOTAL_OUTPUT_COMPONENTS=_C('GL_MAX_TESS_CONTROL_TOTAL_OUTPUT_COMPONENTS',0x8E85)
GL_MAX_TESS_CONTROL_UNIFORM_BLOCKS=_C('GL_MAX_TESS_CONTROL_UNIFORM_BLOCKS',0x8E89)
GL_MAX_TESS_CONTROL_UNIFORM_COMPONENTS=_C('GL_MAX_TESS_CONTROL_UNIFORM_COMPONENTS',0x8E7F)
GL_MAX_TESS_EVALUATION_INPUT_COMPONENTS=_C('GL_MAX_TESS_EVALUATION_INPUT_COMPONENTS',0x886D)
GL_MAX_TESS_EVALUATION_OUTPUT_COMPONENTS=_C('GL_MAX_TESS_EVALUATION_OUTPUT_COMPONENTS',0x8E86)
GL_MAX_TESS_EVALUATION_TEXTURE_IMAGE_UNITS=_C('GL_MAX_TESS_EVALUATION_TEXTURE_IMAGE_UNITS',0x8E82)
GL_MAX_TESS_EVALUATION_UNIFORM_BLOCKS=_C('GL_MAX_TESS_EVALUATION_UNIFORM_BLOCKS',0x8E8A)
GL_MAX_TESS_EVALUATION_UNIFORM_COMPONENTS=_C('GL_MAX_TESS_EVALUATION_UNIFORM_COMPONENTS',0x8E80)
GL_MAX_TESS_GEN_LEVEL=_C('GL_MAX_TESS_GEN_LEVEL',0x8E7E)
GL_MAX_TESS_PATCH_COMPONENTS=_C('GL_MAX_TESS_PATCH_COMPONENTS',0x8E84)
GL_MAX_TRANSFORM_FEEDBACK_BUFFERS=_C('GL_MAX_TRANSFORM_FEEDBACK_BUFFERS',0x8E70)
GL_MAX_VERTEX_STREAMS=_C('GL_MAX_VERTEX_STREAMS',0x8E71)
GL_MAX_VERTEX_STREAMS=_C('GL_MAX_VERTEX_STREAMS',0x8E71)
GL_MIN_FRAGMENT_INTERPOLATION_OFFSET=_C('GL_MIN_FRAGMENT_INTERPOLATION_OFFSET',0x8E5B)
GL_MIN_PROGRAM_TEXTURE_GATHER_OFFSET=_C('GL_MIN_PROGRAM_TEXTURE_GATHER_OFFSET',0x8E5E)
GL_MIN_SAMPLE_SHADING_VALUE=_C('GL_MIN_SAMPLE_SHADING_VALUE',0x8C37)
GL_NUM_COMPATIBLE_SUBROUTINES=_C('GL_NUM_COMPATIBLE_SUBROUTINES',0x8E4A)
GL_PATCHES=_C('GL_PATCHES',0x000E)
GL_PATCH_DEFAULT_INNER_LEVEL=_C('GL_PATCH_DEFAULT_INNER_LEVEL',0x8E73)
GL_PATCH_DEFAULT_OUTER_LEVEL=_C('GL_PATCH_DEFAULT_OUTER_LEVEL',0x8E74)
GL_PATCH_VERTICES=_C('GL_PATCH_VERTICES',0x8E72)
GL_PROXY_TEXTURE_CUBE_MAP_ARRAY=_C('GL_PROXY_TEXTURE_CUBE_MAP_ARRAY',0x900B)
GL_QUADS=_C('GL_QUADS',0x0007)
GL_SAMPLER_CUBE_MAP_ARRAY=_C('GL_SAMPLER_CUBE_MAP_ARRAY',0x900C)
GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW=_C('GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW',0x900D)
GL_SAMPLE_SHADING=_C('GL_SAMPLE_SHADING',0x8C36)
GL_TESS_CONTROL_OUTPUT_VERTICES=_C('GL_TESS_CONTROL_OUTPUT_VERTICES',0x8E75)
GL_TESS_CONTROL_SHADER=_C('GL_TESS_CONTROL_SHADER',0x8E88)
GL_TESS_EVALUATION_SHADER=_C('GL_TESS_EVALUATION_SHADER',0x8E87)
GL_TESS_GEN_MODE=_C('GL_TESS_GEN_MODE',0x8E76)
GL_TESS_GEN_POINT_MODE=_C('GL_TESS_GEN_POINT_MODE',0x8E79)
GL_TESS_GEN_SPACING=_C('GL_TESS_GEN_SPACING',0x8E77)
GL_TESS_GEN_VERTEX_ORDER=_C('GL_TESS_GEN_VERTEX_ORDER',0x8E78)
GL_TEXTURE_BINDING_CUBE_MAP_ARRAY=_C('GL_TEXTURE_BINDING_CUBE_MAP_ARRAY',0x900A)
GL_TEXTURE_CUBE_MAP_ARRAY=_C('GL_TEXTURE_CUBE_MAP_ARRAY',0x9009)
GL_TRANSFORM_FEEDBACK=_C('GL_TRANSFORM_FEEDBACK',0x8E22)
GL_TRANSFORM_FEEDBACK_BINDING=_C('GL_TRANSFORM_FEEDBACK_BINDING',0x8E25)
GL_TRANSFORM_FEEDBACK_BUFFER_ACTIVE=_C('GL_TRANSFORM_FEEDBACK_BUFFER_ACTIVE',0x8E24)
GL_TRANSFORM_FEEDBACK_BUFFER_PAUSED=_C('GL_TRANSFORM_FEEDBACK_BUFFER_PAUSED',0x8E23)
GL_UNIFORM_BLOCK_REFERENCED_BY_TESS_CONTROL_SHADER=_C('GL_UNIFORM_BLOCK_REFERENCED_BY_TESS_CONTROL_SHADER',0x84F0)
GL_UNIFORM_BLOCK_REFERENCED_BY_TESS_EVALUATION_SHADER=_C('GL_UNIFORM_BLOCK_REFERENCED_BY_TESS_EVALUATION_SHADER',0x84F1)
GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY=_C('GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY',0x900F)
@_f
@_p.types(None,_cs.GLenum,_cs.GLuint,_cs.GLuint)
def glBeginQueryIndexed(target,index,id):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLuint)
def glBindTransformFeedback(target,id):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,_cs.GLenum)
def glBlendEquationSeparatei(buf,modeRGB,modeAlpha):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum)
def glBlendEquationi(buf,mode):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,_cs.GLenum,_cs.GLenum,_cs.GLenum)
def glBlendFuncSeparatei(buf,srcRGB,dstRGB,srcAlpha,dstAlpha):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,_cs.GLenum)
def glBlendFunci(buf,src,dst):pass
@_f
@_p.types(None,_cs.GLsizei,arrays.GLuintArray)
def glDeleteTransformFeedbacks(n,ids):pass
@_f
@_p.types(None,_cs.GLenum,ctypes.c_void_p)
def glDrawArraysIndirect(mode,indirect):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,ctypes.c_void_p)
def glDrawElementsIndirect(mode,type,indirect):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLuint)
def glDrawTransformFeedback(mode,id):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLuint,_cs.GLuint)
def glDrawTransformFeedbackStream(mode,id,stream):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLuint)
def glEndQueryIndexed(target,index):pass
@_f
@_p.types(None,_cs.GLsizei,arrays.GLuintArray)
def glGenTransformFeedbacks(n,ids):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,_cs.GLuint,_cs.GLsizei,arrays.GLsizeiArray,arrays.GLcharArray)
def glGetActiveSubroutineName(program,shadertype,index,bufsize,length,name):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,_cs.GLuint,_cs.GLsizei,arrays.GLsizeiArray,arrays.GLcharArray)
def glGetActiveSubroutineUniformName(program,shadertype,index,bufsize,length,name):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,_cs.GLuint,_cs.GLenum,arrays.GLintArray)
def glGetActiveSubroutineUniformiv(program,shadertype,index,pname,values):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,_cs.GLenum,arrays.GLintArray)
def glGetProgramStageiv(program,shadertype,pname,values):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLuint,_cs.GLenum,arrays.GLintArray)
def glGetQueryIndexediv(target,index,pname,params):pass
@_f
@_p.types(_cs.GLuint,_cs.GLuint,_cs.GLenum,arrays.GLcharArray)
def glGetSubroutineIndex(program,shadertype,name):pass
@_f
@_p.types(_cs.GLint,_cs.GLuint,_cs.GLenum,arrays.GLcharArray)
def glGetSubroutineUniformLocation(program,shadertype,name):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLint,arrays.GLuintArray)
def glGetUniformSubroutineuiv(shadertype,location,params):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLint,arrays.GLdoubleArray)
def glGetUniformdv(program,location,params):pass
@_f
@_p.types(_cs.GLboolean,_cs.GLuint)
def glIsTransformFeedback(id):pass
@_f
@_p.types(None,_cs.GLfloat)
def glMinSampleShading(value):pass
@_f
@_p.types(None,_cs.GLenum,arrays.GLfloatArray)
def glPatchParameterfv(pname,values):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLint)
def glPatchParameteri(pname,value):pass
@_f
@_p.types(None,)
def glPauseTransformFeedback():pass
@_f
@_p.types(None,)
def glResumeTransformFeedback():pass
@_f
@_p.types(None,_cs.GLint,_cs.GLdouble)
def glUniform1d(location,x):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLsizei,arrays.GLdoubleArray)
def glUniform1dv(location,count,value):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLdouble,_cs.GLdouble)
def glUniform2d(location,x,y):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLsizei,arrays.GLdoubleArray)
def glUniform2dv(location,count,value):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLdouble,_cs.GLdouble,_cs.GLdouble)
def glUniform3d(location,x,y,z):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLsizei,arrays.GLdoubleArray)
def glUniform3dv(location,count,value):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLdouble,_cs.GLdouble,_cs.GLdouble,_cs.GLdouble)
def glUniform4d(location,x,y,z,w):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLsizei,arrays.GLdoubleArray)
def glUniform4dv(location,count,value):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLsizei,_cs.GLboolean,arrays.GLdoubleArray)
def glUniformMatrix2dv(location,count,transpose,value):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLsizei,_cs.GLboolean,arrays.GLdoubleArray)
def glUniformMatrix2x3dv(location,count,transpose,value):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLsizei,_cs.GLboolean,arrays.GLdoubleArray)
def glUniformMatrix2x4dv(location,count,transpose,value):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLsizei,_cs.GLboolean,arrays.GLdoubleArray)
def glUniformMatrix3dv(location,count,transpose,value):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLsizei,_cs.GLboolean,arrays.GLdoubleArray)
def glUniformMatrix3x2dv(location,count,transpose,value):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLsizei,_cs.GLboolean,arrays.GLdoubleArray)
def glUniformMatrix3x4dv(location,count,transpose,value):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLsizei,_cs.GLboolean,arrays.GLdoubleArray)
def glUniformMatrix4dv(location,count,transpose,value):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLsizei,_cs.GLboolean,arrays.GLdoubleArray)
def glUniformMatrix4x2dv(location,count,transpose,value):pass
@_f
@_p.types(None,_cs.GLint,_cs.GLsizei,_cs.GLboolean,arrays.GLdoubleArray)
def glUniformMatrix4x3dv(location,count,transpose,value):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLsizei,arrays.GLuintArray)
def glUniformSubroutinesuiv(shadertype,count,indices):pass
| [
"[email protected]"
] | |
9deb933676e6f8022a6d68c76123cb045891c191 | 672d535e7586289873fd5167bd9888383aff02e4 | /src/chapter_10/file_writer.py | 05d3024a8efc186caf79a788cbcce915c16e552a | [] | no_license | CatchTheDog/python_crash_course | 889fd151323e48dfbc6754b86132aad3f1d37856 | 148eda5eb96c501060115aad0bfd6e15ccd04c37 | refs/heads/master | 2020-03-29T22:06:32.015096 | 2018-09-29T02:36:51 | 2018-09-29T02:36:51 | 150,402,932 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | file_name = "..\\test_files\\programming.txt"
with open(file_name,'a') as file_object:
file_object.write("I love programming!\n")
file_object.write("Yes,you are!\n") | [
"[email protected]"
] | |
d6938327087879a9f4238e51bda9ed19d1500dd6 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03261/s340412788.py | 5a6e7ab12a4f437b7b0e143245721a1a5d7f2d38 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 272 | py | def resolve():
N = int(input())
W = [input() for _ in range(N)]
if len(set(W)) != N:
print('No')
return
for i in range(N-1):
if W[i][-1] != W[i+1][0]:
print('No')
return
print('Yes')
return
resolve() | [
"[email protected]"
] | |
13fb7bd55c6b4c3bcf6151f1231cd06bed96325b | ff6248be9573caec94bea0fa2b1e4b6bf0aa682b | /StudentProblem/10.21.12.20/5/1569572839.py | b804973202990b201d8fa526eab461fdb643c04b | [] | no_license | LennartElbe/codeEvo | 0e41b1a7705204e934ef71a5a28c047366c10f71 | e89b329bc9edd37d5d9986f07ca8a63d50686882 | refs/heads/master | 2020-12-21T17:28:25.150352 | 2020-03-26T10:22:35 | 2020-03-26T10:22:35 | 236,498,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | import functools
import typing
import string
import random
import pytest
## Lösung Teil 1.
def mysum(zs : list) -> int:
return sum(xs)
## Lösung Teil 2. (Tests)
def test_2():
assert mysum([1,2,3]) == 6
######################################################################
| [
"[email protected]"
] | |
7bb0b9509db055c53732ac141dd5e0394c6ef70b | 2c3f13857d4a915410de5ac9547745eb2769db5f | /eval/e43/compare_text.py | 87358093ffb7d43d225e9232d6c758060c29fa51 | [] | no_license | andrewhead/StackSkim | 43a4cf769645bb70202075f8077fa4d5d7be2a4b | 9ac11705ff82aa978d1a87177059e665f4e5ebef | refs/heads/master | 2020-06-03T16:15:15.127268 | 2016-01-16T17:16:36 | 2016-01-16T17:16:36 | 50,692,945 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,056 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
import codecs
logging.basicConfig(level=logging.INFO, format="%(message)s")
def main():
with codecs.open('javascript_text.txt', 'r', 'utf-8') as js_file:
with codecs.open('beautifulsoup_text.txt', 'r', 'utf-8') as bs_file:
js_text = js_file.read()
bs_text = bs_file.read()
for i in range(min([len(js_text), len(bs_text)])):
if js_text[i] != bs_text[i]:
print "==== Mismatch at index ====", i
print "Javascript: ", repr(js_text[i])
print "BeautifulSoup: ", repr(bs_text[i])
print "* Javascript before: "
print repr(js_text[i-50:i])
print "* BeautifulSoup before: "
print repr(bs_text[i-50:i])
print "* Javascript after: "
print repr(js_text[i:i+50])
print "* BeautifulSoup after: "
print repr(bs_text[i:i+50])
break
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
0516724e956d3a03c966ac81dd33f50bb9a93f14 | f019ca1e4029b4077472087d1b677052583c0392 | /qa/rpc-tests/keypool.py | aeaa4435c0d9ce9ede5ce333dced5cf31e78ffc9 | [
"MIT"
] | permissive | mirzaei-ce/core-civilbit | 9204dd9c4c3ce04f867105da4e7fa9a56af1f8ba | cab3e53bdc6b04a84f4bc48114efc07865be814a | refs/heads/master | 2021-04-26T05:03:32.282526 | 2017-10-16T15:39:44 | 2017-10-16T15:39:44 | 107,148,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,338 | py | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the wallet keypool, and interaction with wallet encryption/locking
# Add python-civilbitrpc to module search path:
from test_framework.test_framework import CivilbitTestFramework
from test_framework.util import *
def check_array_result(object_array, to_match, expected):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
"""
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0:
raise AssertionError("No objects matched %s"%(str(to_match)))
class KeyPoolTest(CivilbitTestFramework):
def run_test(self):
nodes = self.nodes
# Encrypt wallet and wait to terminate
nodes[0].encryptwallet('test')
civilbitd_processes[0].wait()
# Restart node 0
nodes[0] = start_node(0, self.options.tmpdir)
# Keep creating keys
addr = nodes[0].getnewaddress()
try:
addr = nodes[0].getnewaddress()
raise AssertionError('Keypool should be exhausted after one address')
except JSONRPCException,e:
assert(e.error['code']==-12)
# put three new keys in the keypool
nodes[0].walletpassphrase('test', 12000)
nodes[0].keypoolrefill(3)
nodes[0].walletlock()
# drain the keys
addr = set()
addr.add(nodes[0].getrawchangeaddress())
addr.add(nodes[0].getrawchangeaddress())
addr.add(nodes[0].getrawchangeaddress())
addr.add(nodes[0].getrawchangeaddress())
# assert that four unique addresses were returned
assert(len(addr) == 4)
# the next one should fail
try:
addr = nodes[0].getrawchangeaddress()
raise AssertionError('Keypool should be exhausted after three addresses')
except JSONRPCException,e:
assert(e.error['code']==-12)
# refill keypool with three new addresses
nodes[0].walletpassphrase('test', 12000)
nodes[0].keypoolrefill(3)
nodes[0].walletlock()
# drain them by mining
nodes[0].generate(1)
nodes[0].generate(1)
nodes[0].generate(1)
nodes[0].generate(1)
try:
nodes[0].generate(1)
raise AssertionError('Keypool should be exhausted after three addesses')
except JSONRPCException,e:
assert(e.error['code']==-12)
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain(self.options.tmpdir)
def setup_network(self):
self.nodes = start_nodes(1, self.options.tmpdir)
if __name__ == '__main__':
KeyPoolTest().main()
| [
"[email protected]"
] | |
e842a0b0122230b18a59c01e0b2b0561a33e8a9a | 6a33cb94d4af1d8a7329ddc6c9d42f870c35bb2f | /python/euler24.py | f4dfa43b28627a2b567d1d09688bcc6fd94d3b85 | [] | no_license | vochong/project-euler | 836321cc8e7d2e7cdf22b3b136d44dcba74a8701 | 6a0c7103861ff825bf84800b6e2e62819a41e36d | refs/heads/master | 2020-04-29T10:41:48.487159 | 2018-09-19T00:13:34 | 2018-09-19T00:13:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | from itertools import permutations
def euler24():
"""
A permutation is an ordered arrangement of objects. For example, 3124 is
one possible permutation of the digits 1, 2, 3 and 4. If all of the
permutations are listed numerically or alphabetically, we call it
lexicographic order. The lexicographic permutations of 0, 1 and 2 are:
012 021 102 120 201 210
What is the millionth lexicographic permutation of the digits 0, 1, 2, 3,
4, 5, 6, 7, 8 and 9?
"""
perms = [''.join(p) for p in permutations("0123456789")]
return perms[999999]
if __name__ == "__main__":
print euler24()
| [
"[email protected]"
] | |
eaa9e3662e78f9f1fb39c52cf1e2e525f22310eb | a169199ebb9f4a7b81cd00ff23af50aeb591ffe4 | /clpy/types/mutabledict.py | 8b4c40a0ef9fc6e1b78077e8aad89ceb47cf9954 | [] | no_license | zielmicha/clojure-pypy | b2eab9437997e4c250d455b3a6d5f4c036855cdf | 41c8f14a19173c1b5452bcdb1f7f6df23e6cdecf | refs/heads/master | 2021-01-23T12:18:45.583076 | 2014-02-21T19:22:30 | 2014-02-21T19:22:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 723 | py | from clpy.types.root import Root
from clpy.types.dict import PersistentHashTrie
class MutableDict(Root):
'''
Mutable dictionary that should be based on PyPy's implementation,
but I can't get it accepted by translator.
So, for now, it's based on persistent hash trie.
'''
def __init__(self, space):
self.container = PersistentHashTrie(space)
def repr(self):
return 'MutableDict{%s}' % ', '.join([
'%s: %s' % (k.repr(), self.container.get_item(k).repr())
for k in self.container.keys() ])
def set_item(self, key, val):
self.container = self.container.assoc(key, val)
def get_item(self, key):
return self.container.get_item(key)
| [
"[email protected]"
] | |
4060361d56fabf2289f5fa2ad768cbe5b277c2eb | d053d7f98c36bc0d0378dcbb02d6686ddf37b2da | /yabgp/agent/__init__.py | 99d77ca9c1f41738f6ed80b147d48327fa595a4c | [
"Apache-2.0"
] | permissive | c0ns0le/yabgp | 51f79006735462a59115b775664d1f94a2399c85 | 19c6462f313dab5548179bc6009375bbc039d0f0 | refs/heads/master | 2021-01-17T21:22:52.368843 | 2016-02-22T02:08:34 | 2016-02-22T02:08:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,475 | py | # Copyright 2015 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import os
import logging
import contextlib
import traceback
from oslo_config import cfg
from twisted.internet import reactor
from twisted.web.server import Site
from yabgp import version, log
from yabgp.core.factory import BGPPeering
from yabgp.config import get_bgp_config
from yabgp.common import constants as bgp_cons
from yabgp.api.app import app
from yabgp.channel.config import rabbit_mq
from yabgp.channel.config import channle_filter
from yabgp.channel.factory import PikaFactory
from yabgp.db import config as db_config
from yabgp.db.mongodb import MongoApi
from yabgp.db import constants as db_cons
log.early_init_log(logging.DEBUG)
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@contextlib.contextmanager
def mongo_operation(mongo_conn, connection_name):
mongo_conn.collection_name = connection_name
db = mongo_conn.get_collection()
yield db
mongo_conn._close_db()
def load_channel_filter_from_db(peer_ip, mongo_api):
"""
load rabbitmq channle filter from mongodb
:return:
"""
LOG.info('try to load yabgp rabbitmq channel filter from mongodb')
mongo_api.collection_name = db_cons.MONGO_COLLECTION_RABBIT_CHANNEL_FILTER
try:
filter_list = mongo_api.get_collection().find({'peer_ip': peer_ip})
for item in filter_list:
if item['value'] not in CONF.rabbit_mq.filter[item['type']]:
CONF.rabbit_mq.filter[item['type']][item['value']] = None
except Exception as e:
LOG.debug(traceback.format_exc())
LOG.error('load failed, %s', e)
sys.exit()
pass
def load_bgp_policy_from_db(mongo_conn, connection_name):
"""
load bgp policy from mongodb
:return:
"""
pass
def check_running_mode():
"""
before start the bgp peering, we should check the running mode
:return:
"""
if not CONF.standalone:
# not standalone?
CONF.register_opts(rabbit_mq, group='rabbit_mq')
CONF.register_opts(channle_filter, group='rabbit_mq')
db_config.register_options()
def check_msg_config():
LOG.info('Check configurations about message process')
if CONF.message.write_disk:
if not os.path.exists(CONF.message.write_dir):
os.makedirs(CONF.message.write_dir)
LOG.info('Create dir %s', CONF.message.write_dir)
CONF.message.write_msg_max_size = CONF.message.write_msg_max_size * 1024 * 1024
def register_to_db(peer_ip, mongo_api):
"""
register peer configuration to database
:return:
"""
LOG.info('try to register yabgp agent to database')
peer_config = {
'_id': '%s:%s:%s' % (CONF.rest.bind_host, CONF.rest.bind_port, peer_ip),
'peer_ip': peer_ip,
'bind_host': CONF.rest.bind_host,
'bind_port': CONF.rest.bind_port,
'local_as': CONF.bgp.running_config[peer_ip]['local_as'],
'local_addr': CONF.bgp.running_config[peer_ip]['local_addr'],
'remote_as': CONF.bgp.running_config[peer_ip]['remote_as'],
'remote_addr': CONF.bgp.running_config[peer_ip]['remote_addr'],
'afi_safi': CONF.bgp.afi_safi,
'tag': CONF.bgp.running_config[peer_ip]['tag']
}
mongo_api.collection_name = db_cons.MONGO_COLLECTION_BGP_AGENT
try:
mongo_api.get_collection().save(peer_config)
except Exception as e:
LOG.debug(traceback.format_exc())
LOG.error('register failed, %s', e)
sys.exit()
def prepare_twisted_service():
LOG.info('Prepare twisted services')
# check all peers
all_peers = {}
# check running mode
if not CONF.standalone:
# rabbitmq factory
rabbit_mq_factory = PikaFactory(
host=CONF.rabbit_mq.rabbit_host,
port=CONF.rabbit_mq.rabbit_port,
userid=CONF.rabbit_mq.rabbit_userid,
password=CONF.rabbit_mq.rabbit_password
)
rabbit_mq_factory.peer_list = CONF.bgp.running_config.keys()
rabbit_mq_factory.connect()
# mongodb connection
if CONF.database.use_replica:
mongo_connection = MongoApi(
connection_url=CONF.database.connection,
db_name=CONF.database.dbname,
use_replica=CONF.database.use_replica,
replica_name=CONF.database.replica_name,
read_preference=CONF.database.read_preference,
write_concern=CONF.database.write_concern,
w_timeout=CONF.database.write_concern_timeout
)
else:
mongo_connection = MongoApi(connection_url=CONF.database.connection, db_name=CONF.database.dbname)
# check api bind host
if CONF.rest.bind_host == '0.0.0.0':
LOG.error('please use the exactly rest host ip address when not running in standalone mode')
sys.exit()
# TODO load channel filter and peer policy
else:
rabbit_mq_factory = None
mongo_connection = None
for peer in CONF.bgp.running_config:
LOG.info('Get peer %s configuration', peer)
if not CONF.standalone:
if CONF.bgp.running_config[peer]['local_addr'] == '0.0.0.0':
LOG.error('please use the exactly local bgp ip address when not running in standalone mode')
sys.exit()
if CONF.message.write_disk:
msg_file_path_for_peer = os.path.join(
CONF.message.write_dir,
peer.lower()
)
if not os.path.exists(msg_file_path_for_peer):
os.makedirs(msg_file_path_for_peer)
LOG.info('Create dir %s for peer %s', msg_file_path_for_peer, peer)
LOG.info('BGP message file path is %s', msg_file_path_for_peer)
else:
msg_file_path_for_peer = None
LOG.info('Create BGPPeering instance')
afi_safi_list = [bgp_cons.AFI_SAFI_STR_DICT[afi_safi]
for afi_safi in CONF.bgp.running_config[peer]['afi_safi']]
CONF.bgp.running_config[peer]['afi_safi'] = afi_safi_list
CONF.bgp.running_config[peer]['capability']['local']['afi_safi'] = afi_safi_list
bgp_peering = BGPPeering(
myasn=CONF.bgp.running_config[peer]['local_as'],
myaddr=CONF.bgp.running_config[peer]['local_addr'],
peerasn=CONF.bgp.running_config[peer]['remote_as'],
peeraddr=CONF.bgp.running_config[peer]['remote_addr'],
tag=CONF.bgp.running_config[peer]['tag'],
afisafi=CONF.bgp.running_config[peer]['afi_safi'],
msgpath=msg_file_path_for_peer,
md5=CONF.bgp.running_config[peer]['md5'],
channel=rabbit_mq_factory,
mongo_conn=mongo_connection
)
all_peers[peer] = bgp_peering
CONF.bgp.running_config[peer]['factory'] = bgp_peering
# register to database and check agent role
if not CONF.standalone:
register_to_db(peer_ip=peer, mongo_api=mongo_connection)
if not CONF.bgp.running_config[peer]['tag']:
LOG.error('Please point out the role tag(SRC,DST or BOTH)for not running in standalone mode')
sys.exit()
load_channel_filter_from_db(peer_ip=peer, mongo_api=mongo_connection)
# Starting api server
if sys.version_info[0] == 2:
from twisted.web.wsgi import WSGIResource
LOG.info("Prepare RESTAPI service")
resource = WSGIResource(reactor, reactor.getThreadPool(), app)
site = Site(resource)
try:
reactor.listenTCP(CONF.rest.bind_port, site, interface=CONF.rest.bind_host)
LOG.info("serving RESTAPI on http://%s:%s", CONF.rest.bind_host, CONF.rest.bind_port)
except Exception as e:
LOG.error(e, exc_info=True)
sys.exit()
for peer in all_peers:
LOG.info('start peer, peer address=%s', peer)
all_peers[peer].automatic_start()
reactor.run()
def prepare_service(args=None):
try:
CONF(args=args, project='yabgp', version=version,
default_config_files=['/etc/yabgp/yabgp.ini'])
except cfg.ConfigFilesNotFoundError:
CONF(args=args, project='yabgp', version=version)
check_running_mode()
log.init_log()
LOG.info('Log (Re)opened.')
LOG.info("Configuration:")
cfg.CONF.log_opt_values(LOG, logging.INFO)
try:
get_bgp_config()
check_msg_config()
except Exception as e:
LOG.error(e)
LOG.debug(traceback.format_exc())
sys.exit()
LOG.info('Starting server in PID %s' % os.getpid())
# write pid file
if CONF.pid_file:
with open(CONF.pid_file, 'w') as pid_file:
pid_file.write(str(os.getpid()))
LOG.info('create pid file: %s' % CONF.pid_file)
prepare_twisted_service()
| [
"[email protected]"
] | |
2795c6648d7f1266b7f42b4e91725d9f27268dea | b7b2f80ab5e1ee0ea028576e3014b62b8d3a8d7e | /pyedit/pyedit-036/pyedlib/pedfind.py | 3ac9395fb86b64616c888b4a839f6c126b4421a4 | [] | no_license | pglen/pgpygtk | 4d1405478a714f003984cf3e3db04ff1f767470b | 33f58010e304f1a312f2356de453ecedb7aa21ef | refs/heads/master | 2021-01-22T01:18:52.238415 | 2019-01-01T01:37:24 | 2019-01-01T01:37:24 | 102,215,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,150 | py | #!/usr/bin/env python
# Action Handler for find
import re, string, gtk, glib, gobject, warnings
import peddoc, pedync, pedconfig
from pedutil import *
from pedundo import *
strhist = []
stridx = 0
myentry = None
# -------------------------------------------------------------------------
def find(self, self2, replace = False):
global myentry
self.reptxt = ""
if replace:
head = "pyedit: Find / Replace"
else:
head = "pyedit: Find in text"
dialog = gtk.Dialog(head,
None,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT,
gtk.STOCK_OK, gtk.RESPONSE_ACCEPT))
dialog.set_default_response(gtk.RESPONSE_ACCEPT)
dialog.replace = replace
dialog.set_position(gtk.WIN_POS_CENTER)
try:
dialog.set_icon_from_file(get_img_path("pyedit_sub.png"))
except:
print "Cannot load find dialog icon", sys.exc_info()
self.dialog = dialog
# Spacers
label1 = gtk.Label(" "); label2 = gtk.Label(" ")
label3 = gtk.Label(" "); label4 = gtk.Label(" ")
label5 = gtk.Label(" "); label6 = gtk.Label(" ")
label7 = gtk.Label(" "); label8 = gtk.Label(" ")
warnings.simplefilter("ignore")
entry = gtk.Entry();
myentry = entry
warnings.simplefilter("default")
entry.set_activates_default(True)
if self2.oldsearch == "":
self2.oldsearch = pedconfig.conf.sql.get_str("src")
if self2.oldsearch == None:
self2.oldsearch = ""
if self2.oldrep == "":
self2.oldrep = pedconfig.conf.sql.get_str("rep")
if self2.oldrep == None:
self2.oldrep = ""
# See if we have a selection for search
if self2.xsel != -1:
xssel = min(self2.xsel, self2.xsel2)
xesel = max(self2.xsel, self2.xsel2)
yssel = min(self2.ysel, self2.ysel2)
yesel = max(self2.ysel, self2.ysel2)
if yssel == yesel:
self2.oldsearch = self2.text[yssel][xssel:xesel]
entry.set_text(self2.oldsearch)
dialog.vbox.pack_start(label4)
hbox2 = gtk.HBox()
hbox2.pack_start(label6, False)
hbox2.pack_start(entry)
hbox2.pack_start(label7, False)
dialog.vbox.pack_start(hbox2)
dialog.checkbox = gtk.CheckButton("Use _regular expression")
dialog.checkbox2 = gtk.CheckButton("Case In_sensitive")
dialog.checkbox.set_active(pedconfig.conf.sql.get_int("regex"))
dialog.checkbox2.set_active(pedconfig.conf.sql.get_int("nocase"))
dialog.vbox.pack_start(label5)
hbox = gtk.HBox()
hbox.pack_start(label1); hbox.pack_start(dialog.checkbox)
hbox.pack_start(label2); hbox.pack_start(dialog.checkbox2)
hbox.pack_start(label3);
dialog.vbox.pack_start(hbox)
dialog.vbox.pack_start(label8)
label30 = gtk.Label(" "); label31 = gtk.Label(" ")
label32 = gtk.Label(" "); label33 = gtk.Label(" ")
label34 = gtk.Label(" "); label35 = gtk.Label(" ")
dialog.checkbox3 = gtk.CheckButton("Search _All Buffers")
#dialog.checkbox4 = gtk.CheckButton("Hello")
hbox4 = gtk.HBox()
hbox4.pack_start(label30); hbox4.pack_start(dialog.checkbox3)
#hbox4.pack_start(label31); hbox4.pack_start(dialog.checkbox4)
hbox4.pack_start(label32);
dialog.vbox.pack_start(hbox4)
dialog.vbox.pack_start(label33)
if replace:
dialog.repl = gtk.Entry(); dialog.repl.set_text(self2.oldrep)
dialog.repl.set_activates_default(True)
label10 = gtk.Label(" "); label11 = gtk.Label(" ")
label12 = gtk.Label(" "); label13 = gtk.Label(" ")
hbox3 = gtk.HBox()
hbox3.pack_start(label10, False)
hbox3.pack_start(dialog.repl)
hbox3.pack_start(label11, False)
dialog.vbox.pack_start(hbox3)
dialog.vbox.pack_start(label12)
dialog.connect("key-press-event", find_keypress)
dialog.show_all()
response = dialog.run()
self2.oldsearch = entry.get_text()
self.srctxt = entry.get_text()
if replace:
self.reptxt = dialog.repl.get_text()
strhist.append(self.srctxt)
dialog.destroy()
if response != gtk.RESPONSE_ACCEPT:
return
if dialog.checkbox3.get_active():
nn = self2.mained.notebook.get_n_pages();
cnt = 0; cnt2 = 0
while True:
if cnt >= nn: break
ppp = self2.mained.notebook.get_nth_page(cnt)
self.xnum = cnt * 4
find_show(self, ppp.area)
cnt += 1
else:
self.xnum = 0
find_show(self, self2)
def find_keypress(area, event):
global stridx, strhist, myentry
#print "find keypress", area, event
if event.type == gtk.gdk.KEY_PRESS:
if event.state & gtk.gdk.MOD1_MASK:
if event.keyval == gtk.keysyms.Up or \
event.keyval == gtk.keysyms.Left:
print "find keypress, alt UP or left key"
if stridx < len(strhist):
stridx += 1
myentry.set_text(strhist[stridx]);
if event.keyval == gtk.keysyms.Down or \
event.keyval == gtk.keysyms.Right:
print "find keypress, alt DOWN or right key"
if stridx > 0:
stridx -= 1
myentry.set_text(strhist[stridx]);
# -------------------------------------------------------------------------
def find_show(self, self2):
#print "find_show", "'" + self.srctxt + "'" + self2.fname
self.regex = None
if self.srctxt == "":
self2.mained.update_statusbar("Must specify search string")
return
if self.dialog.checkbox.get_active():
self.dialog.checkbox2.set_active(False)
try:
self.regex = re.compile(self.srctxt)
except re.error, msg:
#print sys.exc_info()
pedync.message("\n Error in regular expression: \n\n"\
" '%s' -- %s" % (self.srctxt, msg),\
None, gtk.MESSAGE_ERROR)
return
win2 = gtk.Window()
#win2.set_position(gtk.WIN_POS_CENTER)
try:
win2.set_icon_from_file(get_img_path("pyedit_sub.png"))
except:
print "Cannot load icon"
ff = os.path.basename(self2.fname)
if self.dialog.replace:
tit = "%s Search '%s' -- Replace '%s'" % \
(ff, self.srctxt, self.reptxt)
else:
tit = "%s Searching '%s'" % (ff, self.srctxt)
win2.set_title(tit)
win2.set_events(
gtk.gdk.POINTER_MOTION_MASK |
gtk.gdk.POINTER_MOTION_HINT_MASK |
gtk.gdk.BUTTON_PRESS_MASK |
gtk.gdk.BUTTON_RELEASE_MASK |
gtk.gdk.KEY_PRESS_MASK |
gtk.gdk.KEY_RELEASE_MASK |
gtk.gdk.FOCUS_CHANGE_MASK )
win2.connect("key-press-event", area_key, self)
win2.connect("key-release-event", area_key, self)
win2.connect("focus-in-event", area_focus, self, self2)
win2.connect("unmap", area_destroy, self)
oldxx = pedconfig.conf.sql.get_int("srcx")
oldyy = pedconfig.conf.sql.get_int("srcy")
oldww = pedconfig.conf.sql.get_int("srcw")
oldhh = pedconfig.conf.sql.get_int("srch")
#print "win2 oldconfig (x y w h) xnum", oldxx, oldyy, oldww, oldhh, self.xnum
if True or oldww == 0 or oldhh == 0 or oldxx == 0 or oldyy == 0:
# Position it out of the way
sxx, syy = self2.mained.window.get_position()
wxx, wyy = self2.mained.window.get_size()
myww = 3 * wxx / 8; myhh = 3 * wyy / 8
win2.set_default_size(myww, myhh)
win2.move(sxx + wxx - (myww + self.xnum), \
syy + wyy - (myhh + self.xnum))
else:
# Restore old size / location
win2.set_default_size(oldww, oldhh)
#win2.move(oldxx + self.xnum, oldyy + self.xnum)
win2.move(oldxx, oldyy)
vbox = gtk.VBox()
win2.treestore = None
win2.tree = create_tree(self, win2, self.srctxt)
win2.tree.set_headers_visible(False)
win2.tree.get_selection().set_mode(gtk.SELECTION_MULTIPLE)
if self.dialog.replace:
butt2 = gtk.Button(" Change _One ")
butt2.connect("clicked", chg_one, self, self2, win2)
butt3 = gtk.Button(" Change _Selected ");
butt3.connect("clicked", chg_sel, self, self2, win2)
butt4 = gtk.Button(" Change _All ");
butt4.connect("clicked", chg_all, self, self2, win2)
lab1 = gtk.Label(" "); lab2 = gtk.Label(" ");
hbox4 = gtk.HBox()
hbox4.pack_start(lab1)
hbox4.pack_start(butt2, False)
hbox4.pack_start(butt3, False)
hbox4.pack_start(butt4, False)
hbox4.pack_start(lab2)
lab3 = gtk.Label(" ");
vbox.pack_start(lab3, False)
vbox.pack_start(hbox4, False)
lab4 = gtk.Label(" ");
vbox.pack_start(lab4, False)
#self.tree.connect("row-activated", tree_sel, self, self2)
win2.tree.connect("cursor-changed", tree_sel_row, self, self2)
stree = gtk.ScrolledWindow()
stree.add(win2.tree)
vbox.pack_start(stree)
win2.add(vbox)
win2.show_all()
# ---------------------------------------------------------------------
was, cnt2 = self2.search(self.srctxt, self.regex, self.dialog.checkbox2.get_active(),
self.dialog.checkbox.get_active())
update_treestore(self, win2, self2.accum, was)
self2.mained.update_statusbar("Found %d matches." % cnt2)
pedconfig.conf.sql.put("src", self.srctxt)
pedconfig.conf.sql.put("regex", self.dialog.checkbox.get_active())
pedconfig.conf.sql.put("nocase", self.dialog.checkbox2.get_active())
if self.reptxt != "":
pedconfig.conf.sql.put("rep", self.reptxt)
win2.tree.grab_focus()
def area_destroy(win2, self):
'''# What a mess ... getting window coordinates. Fulile attempts below
oldxx, oldyy = win2.get_position()
oldww, oldhh = win2.get_size()
#print "old save coord", oldxx, oldyy, oldww, oldhh
xx,yy,ww,hh = win2.get_allocation()
#print "save alloc", xx, yy, ww, hh
aa,bb,cc,dd,ee = gtk.gdk.Window.get_geometry(win2.window)
#print "save geom",aa,bb,cc,dd'''
# Finally, gdk delivers an up to date position
oldxx, oldyy = gtk.gdk.Window.get_position(win2.window)
oldww, oldhh = win2.get_size()
#print "save coord", oldxx, oldyy, oldww, oldhh
pedconfig.conf.sql.put("srcx", oldxx)
pedconfig.conf.sql.put("srcy", oldyy)
pedconfig.conf.sql.put("srcw", oldww)
pedconfig.conf.sql.put("srch", oldhh)
'''
# -------------------------------------------------------------------------
# Locate line:
def src_line2(self, self2, line, cnt):
idx = 0; idx2 = 0;
mlen = len(self.srctxt)
accum = []
while True:
if self.dialog.checkbox2.get_active():
idx = line.lower().find(self.srctxt.lower(), idx)
idx2 = idx
elif self.dialog.checkbox.get_active():
line2 = line[idx:]
#print "line2", line2
if line2 == "":
idx = -1
break
res = self.regex.search(line2)
#print res, res.start(), res.end()
if res:
idx = res.start() + idx
# Null match, ignore it ('*' with zero length match)
if res.end() == res.start():
#print "null match", idx, res.start(), res.end()
# Proceed no matter what
if res.end() != 0:
idx = res.end() + 1
else:
idx += 1
continue
idx2 = res.end() + idx
mlen = res.end() - res.start()
#print "match", line2[res.start():res.end()]
else:
idx = -1
break
else:
idx = line.find(self.srctxt, idx)
idx2 = idx
if idx >= 0:
line2 = str(idx) + ":" + str(cnt) +\
":" + str(mlen) + " " + line
#cnt2 += 1
#self2.accum.append(line2)
accum.append(line2)
idx = idx2 + 1
else:
break
return accum
'''
# -------------------------------------------------------------------------
def tree_sel_row(xtree, self, self2):
sel = xtree.get_selection()
xmodel, xiter = sel.get_selected_rows()
# In muti selection, only process first
for aa in xiter:
xstr = xmodel.get_value(xmodel.get_iter(aa), 0)
#print "Selected:", xstr
break
# Get back numbers (the C++ way)
#idx = xstr.find(":"); xxx = xstr[:idx]
#idx2 = xstr.find(":", idx+1); yyy = xstr[idx+1:idx2]
#idx3 = xstr.find(" ", idx2+1); mlen = xstr[idx2+1:idx3]
# Get back numbers the python way
try:
bb = xstr.split(" ")[0].split(":")
except:
pass
# Confirm results:
# print "TREE sel", bb
try:
self2.gotoxy(int(bb[0]), int(bb[1]), int(bb[2]), True)
except:
pass
# Focus on the current window
def area_focus(area, event, self, self2):
#print "area_focus"
nn = self2.notebook.get_n_pages();
for aa in range(nn):
vcurr = self2.notebook.get_nth_page(aa)
if vcurr.area == self2:
self2.notebook.set_current_page(aa)
self2.mained.window.set_focus(vcurr.area)
# Call key handler
def area_key(area, event, self):
#print "area_key", event
# Do key down:
if event.type == gtk.gdk.KEY_PRESS:
if event.keyval == gtk.keysyms.Escape:
#print "Esc"
area.destroy()
if event.type == gtk.gdk.KEY_PRESS:
if event.keyval == gtk.keysyms.Return:
#print "Ret"
area.destroy()
if event.keyval == gtk.keysyms.Alt_L or \
event.keyval == gtk.keysyms.Alt_R:
self.alt = True;
if event.keyval >= gtk.keysyms._1 and \
event.keyval <= gtk.keysyms._9:
print "pedwin Alt num", event.keyval - gtk.keysyms._1
if event.keyval == gtk.keysyms.x or \
event.keyval == gtk.keysyms.X:
if self.alt:
area.destroy()
elif event.type == gtk.gdk.KEY_RELEASE:
if event.keyval == gtk.keysyms.Alt_L or \
event.keyval == gtk.keysyms.Alt_R:
self.alt = False;
# Tree handlers
def start_tree(self, win2):
if not win2.treestore:
win2.treestore = gtk.TreeStore(str)
# Delete previous contents
try:
while True:
root = win2.treestore.get_iter_first()
win2.treestore.remove(root)
except:
#print sys.exc_info()
pass
piter = win2.treestore.append(None, ["Searching .."])
win2.treestore.append(piter, ["None .."])
# -------------------------------------------------------------------------
def create_tree(self, win2, match, text = None):
start_tree(self, win2)
# create the TreeView using treestore
tv = gtk.TreeView(win2.treestore)
tv.set_enable_search(True)
# create a CellRendererText to render the data
cell = gtk.CellRendererText()
# create the TreeViewColumn to display the data
#tvcolumn = gtk.TreeViewColumn("Matches for '" + match + "'")
tvcolumn = gtk.TreeViewColumn()
# add the cell to the tvcolumn and allow it to expand
tvcolumn.pack_start(cell, True)
# set the cell "text" attribute to column 0 - retrieve text
# from that column in treestore
tvcolumn.add_attribute(cell, 'text', 0)
# add tvcolumn to treeview
tv.append_column(tvcolumn)
return tv
def update_treestore(self, win2, text, was):
#print "was", was
# Delete previous contents
try:
while True:
root = win2.treestore.get_iter_first()
win2.treestore.remove(root)
except:
pass
#print sys.exc_info()
if not text:
win2.treestore.append(None, ["No Match",])
return
cnt = 0; piter2 = None; next = False
try:
for line in text:
piter = win2.treestore.append(None, [cut_lead_space(line)])
if next:
next = False; piter2 = piter
if cnt == was:
next = True
cnt += 1
except:
pass
#print sys.exc_info()
if piter2:
win2.tree.set_cursor(win2.treestore.get_path(piter2))
else:
root = win2.treestore.get_iter_first()
win2.tree.set_cursor(win2.treestore.get_path(root))
# -------------------------------------------------------------------------
# Change current item in the list
def chg_one(butt, self, self2, win2, iter = None):
single = (iter == None)
sel = win2.tree.get_selection()
xmodel, xiter = sel.get_selected_rows()
# Iter from wrappers?
if iter:
sel.select_path(xmodel.get_path(iter))
else:
# In muti selection, only process first
for aa in xiter:
iter = xmodel.get_iter(aa)
sel.select_path(xmodel.get_path(iter))
break
if iter == None:
self2.mained.update_statusbar("Nothing selected")
return
if single:
self2.undoarr.append((0, 0, NOOP, ""))
xstr = xmodel.get_value(iter, 0)
bb = xstr.split(" ")[0].split(":")
#print "ch_one", bb
self2.gotoxy(int(bb[0]), int(bb[1]), int(bb[2]), True)
self.cut(self2, True, False)
self.clip_cb(None, self.reptxt, self2, False)
newstr = self2.text[int(bb[1])]
#print "newstr", newstr
sel.unselect_all()
if single:
# Re-read list. Dirty hack, but makes it cleaner
was, cnt2 = self2.search(self.srctxt, self.regex, self.dialog.checkbox2.get_active(),
self.dialog.checkbox.get_active())
update_treestore(self, win2, self2.accum, was-1)
return next
# -------------------------------------------------------------------------
def chg_all(butt, self, self2, win2):
win2.tree.get_selection().select_all()
chg_sel(butt, self, self2, win2)
def chg_sel(butt, self, self2, win2):
iters = []; cnt2 = 0
sel = win2.tree.get_selection()
xmodel, xiter = sel.get_selected_rows()
# Create a list of changes
for aa in xiter:
iter = xmodel.get_iter(aa)
iters.append(iter)
sel.unselect_all()
self2.undoarr.append((0, 0, NOOP, ""))
# Change in reverse order, so we do not create gaps
iters.reverse()
for iter in iters:
chg_one(butt, self, self2, win2, iter)
if cnt2 % 10 == 0:
usleep(1)
cnt2 += 1
self2.mained.update_statusbar("Changed %d items" % cnt2)
win2.destroy()
def wclose(butt,self):
#print "xclose"
pass
def wprev(butt,self):
#print "wprev"
pass
def wnext(butt,self):
#print "wnext"
pass
| [
"[email protected]"
] | |
34d3bd0138a6b1ff374de550600bd1e994e01e20 | 15fb62305a2fa0146cc84b289642cc01a8407aab | /Python/230-KthSmallestElementInBST.py | e1fce7116f90027161bad3e1f527125aaa0290c7 | [] | no_license | geniousisme/leetCode | ec9bc91864cbe7520b085bdab0db67539d3627bd | 6e12d67e4ab2d197d588b65c1ddb1f9c52a7e047 | refs/heads/master | 2016-09-09T23:34:03.522079 | 2015-09-23T16:15:05 | 2015-09-23T16:15:05 | 32,052,408 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,342 | py | # Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
# @param {TreeNode} root
# @param {integer} k
# @return {integer}
def recurKthSmallest(self, root, k):
res = []
self.inorderTraversal(root.left, res)
return res[k - 1]
def inorderTraversal(self, root, res):
if root:
self.inorderTraversal(root.left, res)
res.append(root.val)
self.inorderTraversal(root.right, res)
return
def kthSmallest(self, root, k):
stack = []
node = root
while node:
stack.append(node)
node = node.left
count = 1
while stack and count <= k:
node = stack.pop()
count += 1
right = node.right
while right:
stack.append(right)
right = right.left
return node.val
if __name__ == '__main__':
s = Solution()
test = TreeNode(4)
test.left = TreeNode(2)
test.right = TreeNode(6)
test.left.left = TreeNode(1)
test.left.right = TreeNode(3)
test.right.left = TreeNode(5)
test.right.right = TreeNode(7)
# print s.countTreeNum(test)
s.kthSmallest(test, 1)
| [
"[email protected]"
] | |
755d6aaeee71030185d492833dfef66356bbd803 | d293b1b5037f7e493eddbe8572cc03ffd9f78890 | /code/sort.py | 574a24cd3b45ffb3a643147fc331bc61787fd95d | [] | no_license | weilaidb/pyqt5 | 49f587e6ec3b74f6b27f070cd007a6946a26820a | 0ad65ed435ecfc87ca32e392bbf67973b4b13e68 | refs/heads/master | 2020-03-07T23:04:51.365624 | 2018-08-01T15:30:44 | 2018-08-01T15:30:44 | 127,771,719 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 316 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
print(sorted([36, 5, -12, 9, -21]))
print(sorted([36, 5, -12, 9, -21], key=abs))
print(sorted(['bob', 'about', 'Zoo', 'Credit']))
print(sorted(['bob', 'about', 'Zoo', 'Credit'], key=str.lower))
print(sorted(['bob', 'about', 'Zoo', 'Credit'], key=str.lower, reverse=True)) | [
"[email protected]"
] | |
d49d9eb6c8717ccde8a3a9fb5629628f7f989129 | ecf0d106831b9e08578845674a457a166b6e0a14 | /OOP/inheritance_EXERCISE/players_and_monsters/project/blade_knight.py | f3fe7f02553cff2f5de2b7045840784a828df529 | [] | no_license | ivo-bass/SoftUni-Solutions | 015dad72cff917bb74caeeed5e23b4c5fdeeca75 | 75612d4bdb6f41b749e88f8d9c512d0e00712011 | refs/heads/master | 2023-05-09T23:21:40.922503 | 2021-05-27T19:42:03 | 2021-05-27T19:42:03 | 311,329,921 | 8 | 5 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | from project.dark_knight import DarkKnight
class BladeKnight(DarkKnight):
def __init__(self, username: str, level: int):
super().__init__(username, level)
| [
"[email protected]"
] | |
ae7b77baced08878c051998a52c52e932ab8b76b | 1aebc65860ed3e45390c83f57d4345a2fd0c1b0b | /tests/unit/customizations/test_assumerole.py | ad6fdddbaa34bc044f11a3f63b1955468040bc6c | [
"Apache-2.0"
] | permissive | andhit-r/aws-cli | a1f572eb1328e4c5826d94d17a46e1e74399a3c8 | d1737a6031b104efb814e7446fe98640135afadd | refs/heads/develop | 2021-01-24T14:18:37.612517 | 2015-10-23T20:18:54 | 2015-10-23T20:18:54 | 44,942,247 | 0 | 1 | null | 2015-10-26T02:56:02 | 2015-10-26T02:56:02 | null | UTF-8 | Python | false | false | 16,206 | py | # Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import shutil
import tempfile
import os
import platform
from datetime import datetime, timedelta
import mock
from botocore.hooks import HierarchicalEmitter
from botocore.exceptions import PartialCredentialsError
from dateutil.tz import tzlocal
from awscli.testutils import unittest
from awscli.customizations import assumerole
class TestAssumeRolePlugin(unittest.TestCase):
def test_assume_role_provider_injected(self):
session = mock.Mock()
assumerole.inject_assume_role_provider(
session, event_name='building-command-table.foo')
session.get_component.assert_called_with('credential_provider')
credential_provider = session.get_component.return_value
call_args = credential_provider.insert_before.call_args[0]
self.assertEqual(call_args[0], 'shared-credentials-file')
self.assertIsInstance(call_args[1], assumerole.AssumeRoleProvider)
def test_assume_role_provider_registration(self):
event_handlers = HierarchicalEmitter()
assumerole.register_assume_role_provider(event_handlers)
session = mock.Mock()
event_handlers.emit('session-initialized', session=session)
# Just verifying that anything on the session was called ensures
# that our handler was called, as it's the only thing that should
# be registered.
session.get_component.assert_called_with('credential_provider')
def test_provider_not_registered_on_error(self):
session = mock.Mock()
session.get_component.side_effect = Exception(
"Couldn't get credential_provider.")
assumerole.inject_assume_role_provider(
session, event_name='building-command-table.foo')
self.assertFalse(
session.get_component.return_value.insert_before.called)
class TestAssumeRoleCredentialProvider(unittest.TestCase):
maxDiff = None
def setUp(self):
self.fake_config = {
'profiles': {
'development': {
'role_arn': 'myrole',
'source_profile': 'longterm',
},
'longterm': {
'aws_access_key_id': 'akid',
'aws_secret_access_key': 'skid',
}
}
}
def create_config_loader(self, with_config=None):
if with_config is None:
with_config = self.fake_config
load_config = mock.Mock()
load_config.return_value = with_config
return load_config
def create_client_creator(self, with_response):
# Create a mock sts client that returns a specific response
# for assume_role.
client = mock.Mock()
client.assume_role.return_value = with_response
return mock.Mock(return_value=client)
def test_assume_role_with_no_cache(self):
response = {
'Credentials': {
'AccessKeyId': 'foo',
'SecretAccessKey': 'bar',
'SessionToken': 'baz',
'Expiration': datetime.now(tzlocal()).isoformat()
},
}
client_creator = self.create_client_creator(with_response=response)
provider = assumerole.AssumeRoleProvider(
self.create_config_loader(),
client_creator, cache={}, profile_name='development')
credentials = provider.load()
self.assertEqual(credentials.access_key, 'foo')
self.assertEqual(credentials.secret_key, 'bar')
self.assertEqual(credentials.token, 'baz')
def test_assume_role_retrieves_from_cache(self):
date_in_future = datetime.utcnow() + timedelta(seconds=1000)
utc_timestamp = date_in_future.isoformat() + 'Z'
self.fake_config['profiles']['development']['role_arn'] = 'myrole'
cache = {
'development--myrole': {
'Credentials': {
'AccessKeyId': 'foo-cached',
'SecretAccessKey': 'bar-cached',
'SessionToken': 'baz-cached',
'Expiration': utc_timestamp,
}
}
}
provider = assumerole.AssumeRoleProvider(
self.create_config_loader(), mock.Mock(),
cache=cache, profile_name='development')
credentials = provider.load()
self.assertEqual(credentials.access_key, 'foo-cached')
self.assertEqual(credentials.secret_key, 'bar-cached')
self.assertEqual(credentials.token, 'baz-cached')
def test_cache_key_is_windows_safe(self):
response = {
'Credentials': {
'AccessKeyId': 'foo',
'SecretAccessKey': 'bar',
'SessionToken': 'baz',
'Expiration': datetime.now(tzlocal()).isoformat()
},
}
cache = {}
self.fake_config['profiles']['development']['role_arn'] = (
'arn:aws:iam::foo-role')
client_creator = self.create_client_creator(with_response=response)
provider = assumerole.AssumeRoleProvider(
self.create_config_loader(),
client_creator, cache=cache, profile_name='development')
provider.load()
# On windows, you cannot use a a ':' in the filename, so
# we need to do some small transformations on the filename
# to replace any ':' that come up.
self.assertEqual(cache['development--arn_aws_iam__foo-role'],
response)
def test_cache_key_with_role_session_name(self):
response = {
'Credentials': {
'AccessKeyId': 'foo',
'SecretAccessKey': 'bar',
'SessionToken': 'baz',
'Expiration': datetime.now(tzlocal()).isoformat()
},
}
cache = {}
self.fake_config['profiles']['development']['role_arn'] = (
'arn:aws:iam::foo-role')
self.fake_config['profiles']['development']['role_session_name'] = (
'foo_role_session_name')
client_creator = self.create_client_creator(with_response=response)
provider = assumerole.AssumeRoleProvider(
self.create_config_loader(),
client_creator, cache=cache, profile_name='development')
provider.load()
self.assertEqual(cache['development--arn_aws_iam__foo-role--foo_role_session_name'],
response)
def test_assume_role_in_cache_but_expired(self):
expired_creds = datetime.utcnow()
utc_timestamp = expired_creds.isoformat() + 'Z'
response = {
'Credentials': {
'AccessKeyId': 'foo',
'SecretAccessKey': 'bar',
'SessionToken': 'baz',
'Expiration': utc_timestamp,
},
}
client_creator = self.create_client_creator(with_response=response)
cache = {
'development--myrole': {
'Credentials': {
'AccessKeyId': 'foo-cached',
'SecretAccessKey': 'bar-cached',
'SessionToken': 'baz-cached',
'Expiration': utc_timestamp,
}
}
}
provider = assumerole.AssumeRoleProvider(
self.create_config_loader(), client_creator,
cache=cache, profile_name='development')
credentials = provider.load()
self.assertEqual(credentials.access_key, 'foo')
self.assertEqual(credentials.secret_key, 'bar')
self.assertEqual(credentials.token, 'baz')
def test_role_session_name_provided(self):
self.fake_config['profiles']['development']['role_session_name'] = 'myname'
response = {
'Credentials': {
'AccessKeyId': 'foo',
'SecretAccessKey': 'bar',
'SessionToken': 'baz',
'Expiration': datetime.now(tzlocal()).isoformat(),
},
}
client_creator = self.create_client_creator(with_response=response)
provider = assumerole.AssumeRoleProvider(
self.create_config_loader(),
client_creator, cache={}, profile_name='development')
provider.load()
client = client_creator.return_value
client.assume_role.assert_called_with(
RoleArn='myrole', RoleSessionName='myname')
def test_external_id_provided(self):
self.fake_config['profiles']['development']['external_id'] = 'myid'
response = {
'Credentials': {
'AccessKeyId': 'foo',
'SecretAccessKey': 'bar',
'SessionToken': 'baz',
'Expiration': datetime.now(tzlocal()).isoformat(),
},
}
client_creator = self.create_client_creator(with_response=response)
provider = assumerole.AssumeRoleProvider(
self.create_config_loader(),
client_creator, cache={}, profile_name='development')
provider.load()
client = client_creator.return_value
client.assume_role.assert_called_with(
RoleArn='myrole', ExternalId='myid', RoleSessionName=mock.ANY)
def test_assume_role_with_mfa(self):
self.fake_config['profiles']['development']['mfa_serial'] = 'mfa'
response = {
'Credentials': {
'AccessKeyId': 'foo',
'SecretAccessKey': 'bar',
'SessionToken': 'baz',
'Expiration': datetime.now(tzlocal()).isoformat(),
},
}
client_creator = self.create_client_creator(with_response=response)
prompter = mock.Mock(return_value='token-code')
provider = assumerole.AssumeRoleProvider(
self.create_config_loader(), client_creator,
cache={}, profile_name='development', prompter=prompter)
provider.load()
client = client_creator.return_value
# In addition to the normal assume role args, we should also
# inject the serial number from the config as well as the
# token code that comes from prompting the user (the prompter
# object).
client.assume_role.assert_called_with(
RoleArn='myrole', RoleSessionName=mock.ANY, SerialNumber='mfa',
TokenCode='token-code')
def test_assume_role_mfa_cannot_refresh_credentials(self):
# Note: we should look into supporting optional behavior
# in the future that allows for reprompting for credentials.
# But for now, if we get temp creds with MFA then when those
# creds expire, we can't refresh the credentials.
self.fake_config['profiles']['development']['mfa_serial'] = 'mfa'
response = {
'Credentials': {
'AccessKeyId': 'foo',
'SecretAccessKey': 'bar',
'SessionToken': 'baz',
# We're creating an expiry time in the past so as
# soon as we try to access the credentials, the
# refresh behavior will be triggered.
'Expiration': (
datetime.now(tzlocal()) -
timedelta(seconds=100)).isoformat(),
},
}
client_creator = self.create_client_creator(with_response=response)
provider = assumerole.AssumeRoleProvider(
self.create_config_loader(), client_creator,
cache={}, profile_name='development',
prompter=mock.Mock(return_value='token-code'))
creds = provider.load()
with self.assertRaises(assumerole.RefreshWithMFAUnsupportedError):
# access_key is a property that will refresh credentials
# if they're expired. Because we set the expiry time to
# something in the past, this will trigger the refresh
# behavior, with with MFA will currently raise an exception.
creds.access_key
def test_no_config_is_noop(self):
self.fake_config['profiles']['development'] = {
'aws_access_key_id': 'foo',
'aws_secret_access_key': 'bar',
}
provider = assumerole.AssumeRoleProvider(
self.create_config_loader(),
mock.Mock(), cache={}, profile_name='development')
# Because a role_arn was not specified, the AssumeRoleProvider
# is a noop and will not return credentials (which means we
# move on to the next provider).
credentials = provider.load()
self.assertIsNone(credentials)
def test_source_profile_not_provided(self):
del self.fake_config['profiles']['development']['source_profile']
provider = assumerole.AssumeRoleProvider(
self.create_config_loader(),
mock.Mock(), cache={}, profile_name='development')
# source_profile is required, we shoudl get an error.
with self.assertRaises(PartialCredentialsError):
provider.load()
def test_source_profile_does_not_exist(self):
dev_profile = self.fake_config['profiles']['development']
dev_profile['source_profile'] = 'does-not-exist'
provider = assumerole.AssumeRoleProvider(
self.create_config_loader(),
mock.Mock(), cache={}, profile_name='development')
# source_profile is required, we shoudl get an error.
with self.assertRaises(assumerole.InvalidConfigError):
provider.load()
class TestJSONCache(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
self.cache = assumerole.JSONFileCache(self.tempdir)
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_supports_contains_check(self):
# By default the cache is empty because we're
# using a new temp dir everytime.
self.assertTrue('mykey' not in self.cache)
def test_add_key_and_contains_check(self):
self.cache['mykey'] = {'foo': 'bar'}
self.assertTrue('mykey' in self.cache)
def test_added_key_can_be_retrieved(self):
self.cache['mykey'] = {'foo': 'bar'}
self.assertEqual(self.cache['mykey'], {'foo': 'bar'})
def test_only_accepts_json_serializable_data(self):
with self.assertRaises(ValueError):
# set()'s cannot be serialized to a JSOn string.
self.cache['mykey'] = set()
def test_can_override_existing_values(self):
self.cache['mykey'] = {'foo': 'bar'}
self.cache['mykey'] = {'baz': 'newvalue'}
self.assertEqual(self.cache['mykey'], {'baz': 'newvalue'})
def test_can_add_multiple_keys(self):
self.cache['mykey'] = {'foo': 'bar'}
self.cache['mykey2'] = {'baz': 'qux'}
self.assertEqual(self.cache['mykey'], {'foo': 'bar'})
self.assertEqual(self.cache['mykey2'], {'baz': 'qux'})
def test_working_dir_does_not_exist(self):
working_dir = os.path.join(self.tempdir, 'foo')
cache = assumerole.JSONFileCache(working_dir)
cache['foo'] = {'bar': 'baz'}
self.assertEqual(cache['foo'], {'bar': 'baz'})
def test_key_error_raised_when_cache_key_does_not_exist(self):
with self.assertRaises(KeyError):
self.cache['foo']
@unittest.skipIf(platform.system() not in ['Darwin', 'Linux'],
'File permissions tests not supported on Windows.')
def test_permissions_for_file_restricted(self):
self.cache['mykey'] = {'foo': 'bar'}
filename = os.path.join(self.tempdir, 'mykey.json')
self.assertEqual(os.stat(filename).st_mode & 0xFFF, 0o600)
| [
"[email protected]"
] | |
d86ebf62cfba1698923c1901510e7e9f1be2816c | 955b968d46b4c436be55daf8aa1b8fc8fe402610 | /other/get_timezone.py | d32feb2a1fb16ad7235d22228884263287fa7712 | [] | no_license | han-huang/python_selenium | 1c8159fd1421b1f0e87cb0df20ae4fe82450f879 | 56f9f5e5687cf533c678a1c12e1ecaa4c50a7795 | refs/heads/master | 2020-03-09T02:24:48.882279 | 2018-04-07T15:06:18 | 2018-04-07T15:06:18 | 128,535,917 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,387 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from time import gmtime, strftime, localtime
import locale, time, sys
# Windows 10 platform
tz1 = strftime("%z", gmtime())
print('gmtime()', gmtime())
print(tz1) # +0800
tz2 = strftime("%z", localtime())
print('localtime()', localtime())
print(tz2) # +0800
print()
tz3 = strftime("%Z", localtime())
print(tz3)
# https://segmentfault.com/a/1190000007598639
# 當在shell裡啟動python repl(交互器)時,默認的環境local設置為'C', 也就是沒有當地語系化設置,
# 這時候可以通過 locale.getdefaultlocale() 來查看shell當前環境的locale設置, 並通過 locale.setlocale(locale.LC_ALL, '')
# 將python解譯器的locale設置成shell環境的locale
print('locale.getlocale()', locale.getlocale())
print('locale.getdefaultlocale()', locale.getdefaultlocale())
locale.setlocale(locale.LC_ALL, '')
print('locale.getlocale()', locale.getlocale())
tz4 = strftime("%Z", localtime())
print(tz4) # 台北標準時間
print()
# http://bbs.fishc.com/thread-76584-1-1.html
# >>> [bytes([ord(c) for c in s]).decode('gbk') for s in time.tzname ]
# ['中国标准时间', '中国标准时间']
tz5 = time.tzname
print(tz5)
tz6 = [bytes([ord(c) for c in s]).decode('big5') for s in time.tzname]
print(tz6) # ['台北標準時間', '台北日光節約時間']
print()
print('sys.stdout.encoding', sys.stdout.encoding) # sys.stdout.encoding utf-8
print('sys.getdefaultencoding()', sys.getdefaultencoding()) # sys.stdout.encoding utf-8
print()
# https://docs.python.org/3/library/functions.html#ord
# ord(c)
# Given a string representing one Unicode character, return an integer representing the Unicode code point of that character.
# For example, ord('a') returns the integer 97 and ord('€') (Euro sign) returns 8364. This is the inverse of chr().
print(ord('a')) # 97
print(chr(97)) # a
print()
print(ord('€')) # 8364
print(chr(8364)) # €
print()
# https://r12a.github.io/app-conversion/
print(ord('\u6642')) # 26178
print(ord('時')) # 26178
print(chr(26178)) # 時
data_utf8 = b'\xE6\x99\x82' # https://r12a.github.io/app-conversion/ 時 UTF-8 code units E6 99 82
print("data_utf8.decode('utf_8')",data_utf8.decode('utf_8')) # data_utf8.decode('utf_8') 時
print("data_utf8.decode('utf_8').encode('utf_8')",data_utf8.decode('utf_8').encode('utf_8')) # data_utf8.decode('utf_8').encode('utf_8') b'\xe6\x99\x82'
| [
"vagrant@LaravelDemoSite"
] | vagrant@LaravelDemoSite |
61144733a7ba970967c47e9af7a46cadf1f7c2db | 3be8b5d0334de1f3521dd5dfd8a58704fb8347f9 | /create_session_index.py | dc01fc1ffa77f8416331b23701799095cea70a20 | [
"MIT"
] | permissive | bmillham/djrq2 | 21a8cbc3087d7ad46087cd816892883cd276db7d | 5f357b3951600a9aecbe6c50727891b1485df210 | refs/heads/master | 2023-07-07T01:07:35.093669 | 2023-06-26T05:21:33 | 2023-06-26T05:21:33 | 72,969,773 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | """ Run this one time, to setup the automatic expiration of sessions """
from web.app.djrq.model.session import Session
from pymongo import MongoClient
collection = MongoClient().djrq2.sessions
Session._expires.create_index(collection)
| [
"[email protected]"
] | |
0f5084fe7deefe3d514592c7c60d72832e33f11f | 066612a390af03bb170d74a21b0fb0b7bcbfe524 | /tests/testcases.py | 12602c0da2e5760fc9d92fd91d9c22da1a2732ca | [
"MIT"
] | permissive | kikeh/django-graphql-extensions | 873577cc9e0085630889fea9fa5539962c31dbcc | 951b295235ca68270066cc70148e2ae937d4eb56 | refs/heads/master | 2020-12-26T05:23:12.039826 | 2020-01-31T09:41:43 | 2020-01-31T09:41:43 | 237,398,866 | 0 | 0 | MIT | 2020-01-31T09:32:25 | 2020-01-31T09:32:24 | null | UTF-8 | Python | false | false | 685 | py | from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from django.test import testcases
from graphql_extensions import testcases as extensions_testcases
from . import schema
class TestCase(testcases.TestCase):
def setUp(self):
self.group = Group.objects.create(name='flavors')
self.user = get_user_model().objects.create_user(
username='test',
password='dolphins')
class SchemaTestCase(TestCase, extensions_testcases.SchemaTestCase):
Query = schema.Query
Mutations = None
def setUp(self):
super().setUp()
self.client.schema(query=self.Query, mutation=self.Mutations)
| [
"[email protected]"
] | |
273fd9f145e716e3be92436742505363a1f97b3e | a6e812e138640e63ccf25bc795d08cea584031e8 | /Codeforces/381/A.py | a56be37e923308335c6c7e716b4f91442338b30a | [] | no_license | abhigupta4/Competitive-Coding | b80de4cb5d5cf0cf14266b2d05f9434348f51a9e | 5ec0209f62a7ee38cb394d1a00dc8f2582ff09be | refs/heads/master | 2021-01-17T06:54:12.707692 | 2020-12-10T18:00:17 | 2020-12-10T18:00:17 | 49,972,984 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 189 | py | def ii():
return map(int,raw_input().split())
n,a,b,c = ii()
if n%4 == 0:
print 0
elif n%4 == 1:
print min(c,3*a,b+a)
elif n%4 == 2:
print min(2*a,b,2*c)
else:
print min(a,b+c,3*c) | [
"[email protected]"
] | |
9bb1fba15b061678ffcff2219bc7888f2a3681bd | 963a49b8a48764929169edb57e492b5021331c87 | /tests/volume_system_test.py | 5d4b9236f96e02ee546f514160efbb9699654d30 | [
"MIT"
] | permissive | ralphje/imagemounter | d0e73b72fe354c23607361db26906e1976b5d8ee | 383b30b17fe24df64ccef071ffb5443abf203368 | refs/heads/master | 2023-02-22T14:27:54.279724 | 2022-04-04T15:08:56 | 2022-04-04T15:08:56 | 16,476,185 | 98 | 51 | MIT | 2023-02-09T10:49:23 | 2014-02-03T10:27:32 | Python | UTF-8 | Python | false | false | 869 | py | import sys
from imagemounter._util import check_output_
from imagemounter.disk import Disk
from imagemounter.parser import ImageParser
class TestParted:
def test_parted_requests_input(self, mocker):
check_output = mocker.patch("imagemounter.volume_system._util.check_output_")
def modified_command(cmd, *args, **kwargs):
if cmd[0] == 'parted':
# A command that requests user input
return check_output_([sys.executable, "-c", "exec(\"try: input('>> ')\\nexcept: pass\")"],
*args, **kwargs)
return mocker.DEFAULT
check_output.side_effect = modified_command
disk = Disk(ImageParser(), path="...")
list(disk.volumes.detect_volumes(method='parted'))
check_output.assert_called()
# TODO: kill process when test fails
| [
"[email protected]"
] | |
e77a593197bf45d0a7c18466dd8753c90f6313e4 | 6e701e3cff75a12a6f7f591fab440e62c2ecb198 | /bookmarks/settings/base.py | 1e1ddc46ec07903323915f87ddcaa7b1e3b5622a | [] | no_license | WellingtonIdeao/d3ex_bookmarks | 10350f54c396881bc9e81b96cb86d3f6d74c8b34 | 52c1a5493202dc415f00c4d63b64e9533ac1845d | refs/heads/main | 2023-06-07T02:40:34.806602 | 2021-07-06T00:30:28 | 2021-07-06T00:30:28 | 381,181,357 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,172 | py | """
Django settings for bookmarks project.
Generated by 'django-admin startproject' using Django 3.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os.path
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent.parent
# Application definition
INSTALLED_APPS = [
'account.apps.AccountConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'social_django',
'images.apps.ImagesConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'bookmarks.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'bookmarks.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Recife'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
LOGIN_URL = 'account:login'
LOGIN_REDIRECT_URL = 'account:dashboard'
LOGOUT_URL = 'account:logout'
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'account.authentication.EmailAuthBackend',
'social_core.backends.facebook.FacebookOAuth2',
]
| [
"[email protected]"
] | |
ec27a29626174e37914d48b357b7324a2522650f | b0c88350eadcbbeab3d9ebb5ea3a2efccc7d95e9 | /classicalml/regression_targetscaling/cveval_regression_robseqprior.py | a8aea8746a7797b9994c97aa645a788a3e9514d0 | [] | no_license | ysuter/brats20-survivalprediction | 853d821c61b91e35f7bf23a1ca1f7295b1ef3ee1 | 388053b789ccf388bc9a29bf94a672d2da973a5c | refs/heads/main | 2023-04-16T15:48:35.486293 | 2021-04-30T17:11:24 | 2021-04-30T17:11:24 | 316,608,244 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,515 | py | #!/home/yannick/anaconda3/envs/py36/bin/python
import json
import numpy as np
import os
import pandas as pd
from skfeature.function.information_theoretical_based import CIFE, JMI, DISR, MIM, CMIM, ICAP, MRMR, MIFS
from skfeature.function.similarity_based import reliefF, fisher_score
from skfeature.function.statistical_based import chi_square, gini_index
from sklearn.ensemble.forest import RandomForestRegressor, ExtraTreesRegressor
from sklearn.ensemble.bagging import BaggingRegressor
from sklearn.experimental import enable_hist_gradient_boosting # noqa
from sklearn.ensemble import HistGradientBoostingClassifier
from sklearn.ensemble.weight_boosting import AdaBoostRegressor
from sklearn.gaussian_process.gpr import GaussianProcessRegressor
from sklearn.isotonic import IsotonicRegression
from sklearn.linear_model.bayes import ARDRegression
from sklearn.linear_model.huber import HuberRegressor
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.passive_aggressive import PassiveAggressiveRegressor
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.linear_model.theil_sen import TheilSenRegressor
from sklearn.linear_model.ransac import RANSACRegressor
from sklearn.neighbors.regression import KNeighborsRegressor
from sklearn.neighbors.regression import RadiusNeighborsRegressor
from sklearn.neural_network.multilayer_perceptron import MLPRegressor
from sklearn.tree.tree import DecisionTreeRegressor, ExtraTreeRegressor
from sklearn.svm.classes import SVR
from sklearn.metrics import accuracy_score, balanced_accuracy_score, mean_squared_error, r2_score
from scipy.stats import spearmanr # spearmanr(currttpdata_bratumia["TTP"].values, currttpdata_bratumia["OS"].values, nan_policy='omit')
from sklearn.preprocessing import QuantileTransformer
from tqdm import tqdm
def survival_classencoding(survarr: np.array, classboundaries: list):
if len(classboundaries) == 1:
survival_classes = [0 if elem <= classboundaries[0] else 1 for elem in survarr]
if len(classboundaries) == 2:
survival_classes = [int(0) if elem <= classboundaries[0] else int(1) if elem <= classboundaries[1] else int(2) for elem in
survarr]
return np.array(survival_classes)
def writeresults(outdf, sel_name, clf_name, split, param1, param2, acc, balacc, mse, r2, rho):
curr_resultsdict = {"Feature selector": sel_name,
"ML method": clf_name,
"Split": split,
"Parameter1": param1,
"Parameter2": param2,
"Accuracy": acc,
"Balanced Accuracy": balacc,
"MSE": mse,
"r2": r2,
"spearmanr": rho
}
outdf = outdf.append(curr_resultsdict, ignore_index=True)
print(outdf)
outdf.to_csv(
"/media/yannick/c4a7e8d3-9ac5-463f-b6e6-92e216ae6ac0/BRATS/BraTS2020/featsel_outputs/cvresults_robseqprior_regression2qtfm.csv",
index=False)
return outdf
def gradeoutput(y_test, y_pred, class_boundary, tfm):
y_test = np.squeeze(tfm.inverse_transform(y_test.reshape(-1, 1)))
y_pred = np.squeeze(tfm.inverse_transform(y_pred.reshape(-1, 1)))
y_test_classes = survival_classencoding(y_test, class_boundary)
y_pred_classes = survival_classencoding(y_pred, class_boundary)
acc = accuracy_score(y_test_classes, y_pred_classes)
balacc = balanced_accuracy_score(y_test_classes, y_pred_classes)
mse = mean_squared_error(y_test, y_pred)
r2 = r2_score(y_test, y_pred)
rho, _ = spearmanr(y_test, y_pred, nan_policy='omit')
return [balacc, acc, mse, r2, rho]
class_boundary = [304.2, 456.25]
# features = pd.read_csv("/media/yannick/c4a7e8d3-9ac5-463f-b6e6-92e216ae6ac0/BRATS/BraTS2020/featsel_outputs/training_scaledfeat.csv", index_col="ID")
features = pd.read_csv("/media/yannick/c4a7e8d3-9ac5-463f-b6e6-92e216ae6ac0/BRATS/BraTS2020/featsel_outputs/training_scaledfeat2.csv", index_col="ID")
splitinfopath = "/media/yannick/c4a7e8d3-9ac5-463f-b6e6-92e216ae6ac0/BRATS/BraTS2020/featsel_outputs/splitinfo.json"
robustnessinfo = pd.read_csv("/media/yannick/c4a7e8d3-9ac5-463f-b6e6-92e216ae6ac0/MANAGE/code/MANAGE/brats2020/featlists/robust_icc85.csv", header=None).values
robustnessinfo = [elem[0] for elem in robustnessinfo]
features_nosurv = features.drop(columns="Survival_days", inplace=False)
surv_days = features["Survival_days"]
surv_classes = survival_classencoding(surv_days, class_boundary)
# only use features from the T1c and FLAIR MRIs
colselect = [elem for elem in features_nosurv.columns if (('T1c' in elem) or ("FLAIR" in elem) or ("Age" in elem) or ("z_mincet" in elem) or ("cet_ventrdist" in elem))]
dropcols = list(set(features_nosurv.columns) - set(colselect))
features_nosurv.drop(columns=dropcols, inplace=True)
colselect = [elem for elem in features_nosurv.columns if np.any([x in elem for x in robustnessinfo])]
dropcols = list(set(features_nosurv.columns) - set(colselect))
features_nosurv.drop(columns=dropcols, inplace=True)
np.random.seed(42)
# load split infos
with open(splitinfopath) as f:
kfolds = json.load(f)
numfeat = 9
randomstate = 42
classifiernames = ["Random Forest",
"Extra Trees",
# "Hist. Gradient Boosting",
"AdaBoost",
"Gaussian Process",
"ARD",
# "Huber",
"Linear",
"Passive Aggressive",
"SGD",
"Theil-Sen",
"RANSAC",
"K-Neighbors",
"Radius Neighbors",
"MLP",
"Decision Tree",
"Extra Tree",
"SVR"
]
classifiers = [
RandomForestRegressor(n_estimators=200, n_jobs=5, random_state=randomstate),
ExtraTreesRegressor(n_estimators=200, n_jobs=5, random_state=randomstate),
# GradientBoostingRegressor(random_state=randomstate), # learning_rate is a hyper-parameter in the range (0.0, 1.0]
# HistGradientBoostingClassifier(random_state=randomstate), # learning_rate is a hyper-parameter in the range (0.0, 1.0]
AdaBoostRegressor(n_estimators=200, random_state=randomstate),
GaussianProcessRegressor(normalize_y=True),
ARDRegression(),
# HuberRegressor(), # epsilon: greater than 1.0, default 1.35
LinearRegression(n_jobs=5),
PassiveAggressiveRegressor(random_state=randomstate), # C: 0.25, 0.5, 1, 5, 10
SGDRegressor(random_state=randomstate),
TheilSenRegressor(n_jobs=5, random_state=randomstate),
RANSACRegressor(random_state=randomstate),
KNeighborsRegressor(weights='distance'), # n_neighbors: 3, 6, 9, 12, 15, 20
RadiusNeighborsRegressor(weights='distance'), # radius: 1, 2, 5, 10, 15
MLPRegressor(max_iter=10000000, random_state=randomstate),
DecisionTreeRegressor(random_state=randomstate), # max_depth = 2, 3, 4, 6, 8
ExtraTreeRegressor(random_state=randomstate), # max_depth = 2, 3, 4, 6, 8
SVR() # C: 0.25, 0.5, 1, 5, 10
]
selectors = [
reliefF.reliefF,
fisher_score.fisher_score,
# chi_square.chi_square,
JMI.jmi,
CIFE.cife,
DISR.disr,
MIM.mim,
CMIM.cmim,
ICAP.icap,
MRMR.mrmr,
MIFS.mifs]
# selectornames_short = ["RELF", "FSCR", "CHSQ", "JMI", "CIFE", "DISR", "MIM", "CMIM", "ICAP", "MRMR", "MIFS"]
selectornames_short = ["RELF", "FSCR", "JMI", "CIFE", "DISR", "MIM", "CMIM", "ICAP", "MRMR", "MIFS"]
# class boundary list
class_boundary = [304.2, 456.25]
numsplits = 10
# Dataframe for highest balanced accuracy for each feature selector / ML combination
outdf = pd.DataFrame(data=[], columns=["Feature selector", "ML method", "Split", "Parameter1", "Parameter2", "Accuracy", "Balanced Accuracy", "MSE", "r2", "spearmanr"])
for split in np.arange(numsplits):
print("Evaluating fold " + str(split))
train_index = kfolds["fold_" + str(split)]["train"]
test_index = kfolds["fold_" + str(split)]["test"]
X_train, X_test = features_nosurv.iloc[train_index], features_nosurv.iloc[test_index]
y_train, y_test = surv_days[train_index], surv_days[test_index]
# scale target with a quantile transform
qtfm = QuantileTransformer(output_distribution='normal', n_quantiles=150, random_state=randomstate)
y_train = np.squeeze(qtfm.fit_transform(y_train.values.reshape(-1, 1)))
y_test = np.squeeze(qtfm.transform(y_test.values.reshape(-1, 1)))
# y_train, y_test = surv_classes[train_index], surv_classes[test_index]
# for every split, perform feature selection
for sel_name, sel in zip(selectornames_short, selectors):
print('#####')
print(sel_name)
print('#####')
if sel_name is "CHSQ":
# shift X values to be non-negative for chsq feature selection
X_train_tmp = X_train + np.abs(X_train.min())
selscore = sel(X_train_tmp, y_train)
selidx = np.argsort(selscore)[::-1]
selidx = selidx[0:numfeat]
selscore = selscore[selidx]
selscoredf = pd.DataFrame(
data=np.transpose(np.vstack((X_train.columns[selidx].values, selscore))),
columns=['Feature', 'Score'])
elif sel_name == "RELF":
selscore = sel(X_train.values, y_train, k=numfeat)
selidx = np.argsort(selscore)[::-1]
# print(selidx)
selidx = selidx[0:numfeat]
selscoredf = pd.DataFrame(
data=np.transpose(np.vstack((X_train.columns[selidx].values, selscore[selidx]))),
columns=['Feature', 'Score'])
elif sel_name == "JMI" or sel_name == "CIFE" or sel_name == "DISR" or sel_name == "MIM" \
or sel_name == "CMIM" or sel_name == "ICAP" or sel_name == "MRMR" or sel_name == "MIFS":
selidx, selscore, _ = sel(X_train.values, y_train, n_selected_features=numfeat)
selscoredf = pd.DataFrame(
data=np.transpose(np.vstack((X_train.columns[selidx].values, selscore))),
columns=['Feature', 'Score'])
else:
selscore = sel(X_train.values, y_train)
selidx = np.argsort(selscore)[::-1]
# print(selidx)
selidx = selidx[0:numfeat]
selscoredf = pd.DataFrame(
data=np.transpose(np.vstack((X_train.columns[selidx].values, selscore[selidx]))),
columns=['Feature', 'Score'])
# get subsets for all number of features
X_train_selected = X_train.iloc[:, selidx[0:numfeat]]
X_test_selected = X_test.iloc[:, selidx[0:numfeat]]
##########################################
# do classification with all classifiers #
##########################################
best_param1 = np.NaN
best_param2 = np.NaN
best_balacc = np.NaN
for clf_name, clf in zip(classifiernames, classifiers):
print(clf_name)
if clf_name is "Passive Aggressive":
param1 = np.NaN
param2 = np.NaN
C = [0.25, 0.5, 1, 5, 10]
for param1 in tqdm(C):
clf = PassiveAggressiveRegressor(C=param1, random_state=randomstate)
clf.fit(X_train_selected, y_train)
y_pred = clf.predict(X_test_selected)
y_train_pred = clf.predict(X_train_selected)
balacc, acc, mse, r2, rho = gradeoutput(y_test, y_pred, class_boundary, qtfm)
outdf = writeresults(outdf, sel_name, clf_name, split, param1, param2, acc, balacc, mse, r2, rho)
elif clf_name is "SVR":
param1 = np.NaN
param2 = np.NaN
C = [0.25, 0.5, 1, 5, 10]
for param1 in tqdm(C):
clf = SVR(C=param1)
clf.fit(X_train_selected, y_train)
y_pred = clf.predict(X_test_selected)
y_train_pred = clf.predict(X_train_selected)
balacc, acc, mse, r2, rho = gradeoutput(y_test, y_pred, class_boundary, qtfm)
outdf = writeresults(outdf, sel_name, clf_name, split, param1, param2, acc, balacc, mse, r2, rho)
elif clf_name is "Decision Tree":
param1 = np.NaN
param2 = np.NaN
max_depthlist = [2, 3, 4, 6, 8]
for param1 in tqdm(max_depthlist):
clf = DecisionTreeRegressor(max_depth=param1, random_state=randomstate)
clf.fit(X_train_selected, y_train)
y_pred = clf.predict(X_test_selected)
y_train_pred = clf.predict(X_train_selected)
balacc, acc, mse, r2, rho = gradeoutput(y_test, y_pred, class_boundary, qtfm)
outdf = writeresults(outdf, sel_name, clf_name, split, param1, param2, acc, balacc, mse, r2, rho)
elif clf_name is "Extra Tree":
param1 = np.NaN
param2 = np.NaN
max_depthlist = [2, 3, 4, 6, 8]
for param1 in tqdm(max_depthlist):
clf = ExtraTreeRegressor(max_depth=param1, random_state=randomstate)
clf.fit(X_train_selected, y_train)
y_pred = clf.predict(X_test_selected)
y_train_pred = clf.predict(X_train_selected)
balacc, acc, mse, r2, rho = gradeoutput(y_test, y_pred, class_boundary, qtfm)
outdf = writeresults(outdf, sel_name, clf_name, split, param1, param2, acc, balacc, mse, r2, rho)
# elif clf_name is "Hist. Gradient Boosting":
# param1 = np.NaN
# param2 = np.NaN
# lr_list = [0.1, 0.3, 0.6, 0.9]
# for param1 in tqdm(lr_list):
# clf = HistGradientBoostingClassifier(learning_rate=param1, random_state=randomstate)
#
# clf.fit(X_train_selected, y_train)
#
# y_pred = clf.predict(X_test_selected)
# y_train_pred = clf.predict(X_train_selected)
#
# balacc, acc, mse, r2, rho = gradeoutput(y_test, y_pred, class_boundary, tfm)
# outdf = writeresults(outdf, sel_name, clf_name, split, param1, param2, acc, balacc, mse, r2, rho)
elif clf_name is "Huber":
param1 = np.NaN
param2 = np.NaN
eps_list = [1.1, 1.2, 1.35, 1.5, 2] # epsilon: greater than 1.0, default 1.35
for param1 in tqdm(eps_list):
clf = HistGradientBoostingClassifier(learning_rate=param1, random_state=randomstate)
clf.fit(X_train_selected, y_train)
y_pred = clf.predict(X_test_selected)
y_train_pred = clf.predict(X_train_selected)
balacc, acc, mse, r2, rho = gradeoutput(y_test, y_pred, class_boundary, qtfm)
outdf = writeresults(outdf, sel_name, clf_name, split, param1, param2, acc, balacc, mse, r2, rho)
elif clf_name is "K-Neighbors":
param1 = np.NaN
param2 = np.NaN
neighbors_list = [3, 6, 9, 12, 15, 20] # epsilon: greater than 1.0, default 1.35
for param1 in tqdm(neighbors_list):
clf = KNeighborsRegressor(n_neighbors=param1, weights='distance')
clf.fit(X_train_selected, y_train)
y_pred = clf.predict(X_test_selected)
y_train_pred = clf.predict(X_train_selected)
balacc, acc, mse, r2, rho = gradeoutput(y_test, y_pred, class_boundary, qtfm)
outdf = writeresults(outdf, sel_name, clf_name, split, param1, param2, acc, balacc, mse, r2, rho)
elif clf_name is "Radius Neighbors":
param1 = np.NaN
param2 = np.NaN
radius_list = [1, 2, 5, 10, 15] # epsilon: greater than 1.0, default 1.35
for param1 in tqdm(radius_list):
clf = KNeighborsRegressor(radius=param1, weights='distance')
clf.fit(X_train_selected, y_train)
y_pred = clf.predict(X_test_selected)
y_train_pred = clf.predict(X_train_selected)
balacc, acc, mse, r2, rho = gradeoutput(y_test, y_pred, class_boundary, qtfm)
outdf = writeresults(outdf, sel_name, clf_name, split, param1, param2, acc, balacc, mse, r2,
rho)
else:
param1 = np.NaN
param2 = np.NaN
clf.fit(X_train_selected, y_train)
y_pred = clf.predict(X_test_selected)
y_train_pred = clf.predict(X_train_selected)
balacc, acc, mse, r2, rho = gradeoutput(y_test, y_pred, class_boundary, qtfm)
outdf = writeresults(outdf, sel_name, clf_name, split, param1, param2, acc, balacc, mse, r2,
rho)
outdf.to_csv("/media/yannick/c4a7e8d3-9ac5-463f-b6e6-92e216ae6ac0/BRATS/BraTS2020/featsel_outputs/cvresults_robseqprior_regression2qtfm.csv", index=False)
| [
"[email protected]"
] | |
5eacb8622f507ddfb3d83ff89f5001b3f5fa21e3 | 634367d6a94d9bce231a8c29cf9713ebfc4b1de7 | /covid_dashboard/views/get_district_stats_on_given_date/api_wrapper.py | 83bfdd8ad95f836f4c9f6a6c05a99e3b45ee01c2 | [] | no_license | saikiranravupalli/covid_dashboard | 5a48c97597983ada36a3bf131edf5ca15f1dedec | 954dd02819fb8f6776fa2828e8971bd55efa657c | refs/heads/master | 2022-11-08T10:11:27.836507 | 2020-06-30T09:00:27 | 2020-06-30T09:00:27 | 269,610,409 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,399 | py | import json
from django.http import HttpResponse
from django_swagger_utils.drf_server.utils.decorator.interface_decorator \
import validate_decorator
from .validator_class import ValidatorClass
from covid_dashboard.interactors\
.get_day_wise_district_details_on_given_date_interactor import \
DistrictStatisticsInteractor
from covid_dashboard.storages.district_storage_implementation import \
DistrictStorageImplementation
from covid_dashboard.storages.mandal_storage_implementation import \
MandalStorageImplementation
from covid_dashboard.presenters.presenter_implementation import \
PresenterImplementation
@validate_decorator(validator_class=ValidatorClass)
def api_wrapper(*args, **kwargs):
district_id = kwargs['district_id']
request_data = kwargs['request_data']
for_date = request_data['for_date']
district_storage = DistrictStorageImplementation()
mandal_storage = MandalStorageImplementation()
presenter = PresenterImplementation()
interactor = DistrictStatisticsInteractor(
district_storage=district_storage,
mandal_storage=mandal_storage,
presenter=presenter
)
response = \
interactor.get_district_statistics_on_given_date(
district_id=district_id,
for_date=for_date
)
json_data = json.dumps(response)
return HttpResponse(json_data, status=200)
| [
"[email protected]"
] | |
6e9393d3d17cfc7054199e6f3782dfc6383250df | 11052dcb2ee70b601c521ba40685fd8e03c86a45 | /apps/oinkerprofile/models.py | 3fcf6b96b867f697ad2cba296ea083b482cb2fd7 | [] | no_license | naistangz/twitter-clone | 53757959de8f3f063cd1fbbc9bc72c024beee22f | 56f44610a729cd040857786f481c8b7f94397a3d | refs/heads/master | 2023-04-13T14:16:44.357440 | 2021-04-25T08:18:42 | 2021-04-25T08:18:42 | 354,792,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | from django.contrib.auth.models import User
from django.db import models
# Create your models here.
class OinkerProfile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
follows = models.ManyToManyField(
'self', related_name='followed_by', symmetrical=False)
avatar = models.ImageField(upload_to='uploads/', blank=True, null=True)
User.oinkerprofile = property(
lambda u: OinkerProfile.objects.get_or_create(user=u)[0])
| [
"[email protected]"
] | |
6690bf9a5dacfc7bafcbe1023ee73b5f95bae792 | 9912574419aa12bac4182309bd3ff2a85cc0fe43 | /myenv/lib/python3.7/site-packages/grpc/_grpcio_metadata.py | c99079b672f7923336da9c5b5a3e26a5a963ff2d | [
"MIT"
] | permissive | liv-yaa/ML_BeeHealth | ad01858a0a5651b2bda68e77ff2e60550e1f3eba | 7e0797d6faaef2b071c841e57fffe45d470aef3c | refs/heads/master | 2023-01-23T01:49:16.444685 | 2023-01-11T17:35:04 | 2023-01-11T17:35:04 | 172,980,260 | 1 | 1 | MIT | 2022-12-08T01:42:46 | 2019-02-27T19:53:28 | Python | UTF-8 | Python | false | false | 26 | py | __version__ = """1.24.3""" | [
"[email protected]"
] | |
1d0c5a63ebb091013f41650238f7244a2189d919 | a9243f735f6bb113b18aa939898a97725c358a6d | /0.15/_downloads/plot_time_frequency_erds.py | ade2c6289d640ea78caded44665a8d296b989398 | [] | permissive | massich/mne-tools.github.io | 9eaf5edccb4c35831400b03278bb8c2321774ef2 | 95650593ba0eca4ff8257ebcbdf05731038d8d4e | refs/heads/master | 2020-04-07T08:55:46.850530 | 2019-09-24T12:26:02 | 2019-09-24T12:26:02 | 158,233,630 | 0 | 0 | BSD-3-Clause | 2018-11-19T14:06:16 | 2018-11-19T14:06:16 | null | UTF-8 | Python | false | false | 5,229 | py | """
===============================
Compute and visualize ERDS maps
===============================
This example calculates and displays ERDS maps of event-related EEG data. ERDS
(sometimes also written as ERD/ERS) is short for event-related
desynchronization (ERD) and event-related synchronization (ERS) [1]_.
Conceptually, ERD corresponds to a decrease in power in a specific frequency
band relative to a baseline. Similarly, ERS corresponds to an increase in
power. An ERDS map is a time/frequency representation of ERD/ERS over a range
of frequencies [2]_. ERDS maps are also known as ERSP (event-related spectral
perturbation) [3]_.
We use a public EEG BCI data set containing two different motor imagery tasks
available at PhysioNet. The two tasks are imagined hand and feet movement. Our
goal is to generate ERDS maps for each of the two tasks.
First, we load the data and create epochs of 5s length. The data sets contain
multiple channels, but we will only consider the three channels C3, Cz, and C4.
We compute maps containing frequencies ranging from 2 to 35Hz. We map ERD to
red color and ERS to blue color, which is the convention in many ERDS
publications. Note that we do not perform any significance tests on the map
values, but instead we display the whole time/frequency maps.
References
----------
.. [1] G. Pfurtscheller, F. H. Lopes da Silva. Event-related EEG/MEG
synchronization and desynchronization: basic principles. Clinical
Neurophysiology 110(11), 1842-1857, 1999.
.. [2] B. Graimann, J. E. Huggins, S. P. Levine, G. Pfurtscheller.
Visualization of significant ERD/ERS patterns in multichannel EEG and
ECoG data. Clinical Neurophysiology 113(1), 43-47, 2002.
.. [3] S. Makeig. Auditory event-related dynamics of the EEG spectrum and
effects of exposure to tones. Electroencephalography and Clinical
Neurophysiology 86(4), 283-293, 1993.
"""
# Authors: Clemens Brunner <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
import mne
from mne.datasets import eegbci
from mne.io import concatenate_raws, read_raw_edf
from mne.time_frequency import tfr_multitaper
def center_cmap(cmap, vmin, vmax):
"""Center given colormap (ranging from vmin to vmax) at value 0.
Note that eventually this could also be achieved by re-normalizing a given
colormap by subclassing matplotlib.colors.Normalize as described here:
https://matplotlib.org/users/colormapnorms.html#custom-normalization-two-linear-ranges
""" # noqa: E501
vzero = abs(vmin) / (vmax - vmin)
index_old = np.linspace(0, 1, cmap.N)
index_new = np.hstack([np.linspace(0, vzero, cmap.N // 2, endpoint=False),
np.linspace(vzero, 1, cmap.N // 2)])
cdict = {"red": [], "green": [], "blue": [], "alpha": []}
for old, new in zip(index_old, index_new):
r, g, b, a = cmap(old)
cdict["red"].append((new, r, r))
cdict["green"].append((new, g, g))
cdict["blue"].append((new, b, b))
cdict["alpha"].append((new, a, a))
return LinearSegmentedColormap("erds", cdict)
# load and preprocess data ####################################################
subject = 1 # use data from subject 1
runs = [6, 10, 14] # use only hand and feet motor imagery runs
fnames = eegbci.load_data(subject, runs)
raws = [read_raw_edf(f, preload=True, stim_channel='auto') for f in fnames]
raw = concatenate_raws(raws)
raw.rename_channels(lambda x: x.strip('.')) # remove dots from channel names
events = mne.find_events(raw, shortest_event=0, stim_channel='STI 014')
picks = mne.pick_channels(raw.info["ch_names"], ["C3", "Cz", "C4"])
# epoch data ##################################################################
tmin, tmax = -1, 4 # define epochs around events (in s)
event_ids = dict(hands=2, feet=3) # map event IDs to tasks
epochs = mne.Epochs(raw, events, event_ids, tmin - 0.5, tmax + 0.5,
picks=picks, baseline=None, preload=True)
# compute ERDS maps ###########################################################
freqs = np.arange(2, 36, 1) # frequencies from 2-35Hz
n_cycles = freqs # use constant t/f resolution
vmin, vmax = -1, 1.5 # set min and max ERDS values in plot
cmap = center_cmap(plt.cm.RdBu, vmin, vmax) # zero maps to white
for event in event_ids:
power = tfr_multitaper(epochs[event], freqs=freqs, n_cycles=n_cycles,
use_fft=True, return_itc=False, decim=2)
power.crop(tmin, tmax)
fig, ax = plt.subplots(1, 4, figsize=(12, 4),
gridspec_kw={"width_ratios": [10, 10, 10, 1]})
for i in range(3):
power.plot([i], baseline=[-1, 0], mode="percent", vmin=vmin, vmax=vmax,
cmap=(cmap, False), axes=ax[i], colorbar=False, show=False)
ax[i].set_title(epochs.ch_names[i], fontsize=10)
ax[i].axvline(0, linewidth=1, color="black", linestyle=":") # event
if i > 0:
ax[i].set_ylabel("")
ax[i].set_yticklabels("")
fig.colorbar(ax[0].collections[0], cax=ax[-1])
fig.suptitle("ERDS ({})".format(event))
fig.show()
| [
"[email protected]"
] | |
c5dd27af9004567fed4a4c508b43d1acfce35e68 | 8fcae139173f216eba1eaa01fd055e647d13fd4e | /.history/scraper_20191220154310.py | 85e5bdeab8d217043d007104d46a712f7cdf91d4 | [] | no_license | EnriqueGalindo/backend-web-scraper | 68fdea5430a0ffb69cc7fb0e0d9bcce525147e53 | 895d032f4528d88d68719838a45dae4078ebcc82 | refs/heads/master | 2020-11-27T14:02:59.989697 | 2019-12-21T19:47:34 | 2019-12-21T19:47:34 | 229,475,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,308 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module docstring: One line description of what your program does.
There should be a blank line in between description above, and this
more detailed description. In this section you should put any caveats,
environment variable expectations, gotchas, and other notes about running
the program. Author tag (below) helps instructors keep track of who
wrote what, when grading.
"""
__author__ = "Enrique Galindo"
# Imports go at the top of your file, after the module docstring.
# One module per import line. These are for example only.
import sys
import requests
import re
import pprint
from html.parser import HTMLParser
regex_email = r'''(?:[a-z0-9!#$%&‘*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&‘*+/=?^_`{|}~-]+)*|“(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21\x23-\x5b\x5d-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])*“)@(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?|\[(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?|[a-z0-9-]*[a-z0-9]:(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21-\x5a\x53-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])+)\])'''
regex_phone = r'''(1?\W*([2-9][0-8][0-9])\W*([2-9][0-9]{2})\W*([0-9]{4})(\se?x?t?(\d*))?)'''
class MyHTMLParser(HTMLParser):
def handle_starttag(self, tag, attrs):
print( "Encountered a start tag:", tag)
def handle_endtag(self, tag):
print() "Encountered an end tag :", tag
def handle_data(self, data):
print "Encountered some data :", data
def main(args):
"""Main function is declared as standalone, for testability"""
good_phone_list = []
url = args[0]
response = requests.get(url)
response.raise_for_status()
url_list = re.findall(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', response.text)
email_list = set(re.findall(regex_email, response.text))
bad_phone_list = set(re.findall(regex_phone, response.text))
for number in bad_phone_list:
good_phone_list.append(number[1] + number[2] + number[3])
print(email_list)
pprint.pprint(good_phone_list)
parser = HTMLParser()
link_list = HTMLParser.handle_starttag('a', [('href', 'http://'), ('href', 'https://')])
if __name__ == '__main__':
"""Docstring goes here"""
main(sys.argv[1:]) | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.