blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4cd457aae559324c28a76e8ff71688100483e7f2 | f7a48634de139b7f5585c2bf3d3014605130428c | /ebedke/plugins/kompot.py | c41f831fd3d7bba415606e8c90bfa14c14c43220 | [
"Apache-2.0",
"MIT"
]
| permissive | ijanos/ebedke | b72dcdef63c575eb4090661bab2e2c7a7864ab76 | 9a0f91cc6536a78d7da9aca1fab22924a56d38e2 | refs/heads/master | 2023-04-20T19:36:03.928669 | 2021-01-24T11:35:15 | 2021-01-24T11:35:15 | 99,848,492 | 35 | 11 | Apache-2.0 | 2023-03-27T22:36:27 | 2017-08-09T20:08:13 | Python | UTF-8 | Python | false | false | 1,742 | py | from datetime import datetime, timedelta
from ebedke.utils.date import days_lower, on_workdays
from ebedke.utils.text import pattern_slice
from ebedke.utils import facebook
from ebedke.pluginmanager import EbedkePlugin
FB_PAGE = "https://www.facebook.com/pg/KompotBisztro/posts/"
FB_ID = "405687736167829"
@on_workdays
def getMenu(today):
day = today.weekday()
is_this_week = lambda date: datetime.strptime(date, '%Y-%m-%dT%H:%M:%S%z').date() > today.date() - timedelta(days=7)
is_today = lambda date: datetime.strptime(date, '%Y-%m-%dT%H:%M:%S%z').date() == today.date()
ignore_hashtags = lambda post: " ".join(word.lower() for word in post.split() if word[0] != "#")
daily_menu_filter = lambda post: is_today(post['created_time']) \
and "menü" in post['message'].lower()
weekly_menu_filter = lambda post: is_this_week(post['created_time']) \
and days_lower[day] in ignore_hashtags(post['message'])
weekly_menu = facebook.get_filtered_post(FB_ID, weekly_menu_filter)
if weekly_menu:
menu = pattern_slice(weekly_menu.splitlines(), [days_lower[day]], days_lower + ["sütiket", "#", "jó étvágyat", "mai menü"])
else:
menu_post = facebook.get_filtered_post(FB_ID, daily_menu_filter).splitlines()
menu = []
for i, line in enumerate(menu_post):
if "A:" in line:
menu = list((menu_post[i - 1], menu_post[i], menu_post[i + 1]))
break
return menu
plugin = EbedkePlugin(
enabled=True,
groups=["corvin"],
name='Kompót',
id='kp',
url=FB_PAGE,
downloader=getMenu,
ttl=timedelta(hours=24),
cards=['szep'],
coord=(47.485753, 19.075932)
)
| [
"[email protected]"
]
| |
931300b3c495baff8b052bb61df42f03e9e0e772 | 1a758ef862f733d98ddd8ebc8ade5cefd95c24f2 | /customers/migrations/0013_facebookcustomer.py | 3abb34e6d2d0c3b6e1e3df3ca8e899dc7fe394e5 | []
| no_license | ajajul/ReactJS_Python | f116b35394666c5b3f2419eb5d8d7aeb077d4a24 | 08310d56fa88f326ddbfdd4b189f2a3a71f76d99 | refs/heads/master | 2020-03-19T03:16:57.510672 | 2018-06-01T10:36:36 | 2018-06-01T10:36:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,157 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('customers', '0012_auto_20160802_1628'),
]
operations = [
migrations.CreateModel(
name='FacebookCustomer',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('facebook_id', models.CharField(max_length=255, verbose_name=b'Facebook ID')),
('first_name', models.CharField(max_length=255, verbose_name=b'First name')),
('last_name', models.CharField(max_length=255, verbose_name=b'Last name')),
('email', models.EmailField(unique=True, max_length=255, verbose_name=b'Email address')),
('gender', models.CharField(max_length=255, verbose_name=b'Gender')),
('customer', models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
),
]
| [
"[email protected]"
]
| |
76ffed6ea5fce21b63dfbacd0d8a3e79515b811f | 64fa49a9a2c0b157aec3224530d7f61ddf9d24fa | /v6.0.2/log/fortios_log_threat_weight.py | 7911f24d762e0a9373349dcc086d0004d60c504e | [
"Apache-2.0"
]
| permissive | chick-tiger/ansible_fgt_modules | 55bee8a41a809b8c5756f6b3f5900721ee7c40e4 | 6106e87199881d1d4c3475a4f275c03d345ec9ad | refs/heads/master | 2020-03-31T11:05:12.659115 | 2018-10-04T10:20:33 | 2018-10-04T10:20:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,651 | py | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2018 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_log_threat_weight
short_description: Configure threat weight settings.
description:
- This module is able to configure a FortiGate or FortiOS by
allowing the user to configure log feature and threat_weight category.
Examples includes all options and need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip adress.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: false
log_threat_weight:
description:
- Configure threat weight settings.
default: null
suboptions:
application:
description:
- Application-control threat weight settings.
suboptions:
category:
description:
- Application category.
id:
description:
- Entry ID.
required: true
level:
description:
- Threat weight score for Application events.
choices:
- disable
- low
- medium
- high
- critical
blocked-connection:
description:
- Threat weight score for blocked connections.
choices:
- disable
- low
- medium
- high
- critical
failed-connection:
description:
- Threat weight score for failed connections.
choices:
- disable
- low
- medium
- high
- critical
geolocation:
description:
- Geolocation-based threat weight settings.
suboptions:
country:
description:
- Country code.
id:
description:
- Entry ID.
required: true
level:
description:
- Threat weight score for Geolocation-based events.
choices:
- disable
- low
- medium
- high
- critical
ips:
description:
- IPS threat weight settings.
suboptions:
critical-severity:
description:
- Threat weight score for IPS critical severity events.
choices:
- disable
- low
- medium
- high
- critical
high-severity:
description:
- Threat weight score for IPS high severity events.
choices:
- disable
- low
- medium
- high
- critical
info-severity:
description:
- Threat weight score for IPS info severity events.
choices:
- disable
- low
- medium
- high
- critical
low-severity:
description:
- Threat weight score for IPS low severity events.
choices:
- disable
- low
- medium
- high
- critical
medium-severity:
description:
- Threat weight score for IPS medium severity events.
choices:
- disable
- low
- medium
- high
- critical
level:
description:
- Score mapping for threat weight levels.
suboptions:
critical:
description:
- Critical level score value (1 - 100).
high:
description:
- High level score value (1 - 100).
low:
description:
- Low level score value (1 - 100).
medium:
description:
- Medium level score value (1 - 100).
malware:
description:
- Anti-virus malware threat weight settings.
suboptions:
botnet-connection:
description:
- Threat weight score for detected botnet connections.
choices:
- disable
- low
- medium
- high
- critical
command-blocked:
description:
- Threat weight score for blocked command detected.
choices:
- disable
- low
- medium
- high
- critical
mimefragmented:
description:
- Threat weight score for mimefragmented detected.
choices:
- disable
- low
- medium
- high
- critical
oversized:
description:
- Threat weight score for oversized file detected.
choices:
- disable
- low
- medium
- high
- critical
switch-proto:
description:
- Threat weight score for switch proto detected.
choices:
- disable
- low
- medium
- high
- critical
virus-blocked:
description:
- Threat weight score for virus (blocked) detected.
choices:
- disable
- low
- medium
- high
- critical
virus-file-type-executable:
description:
- Threat weight score for virus (filetype executable) detected.
choices:
- disable
- low
- medium
- high
- critical
virus-infected:
description:
- Threat weight score for virus (infected) detected.
choices:
- disable
- low
- medium
- high
- critical
virus-outbreak-prevention:
description:
- Threat weight score for virus (outbreak prevention) event.
choices:
- disable
- low
- medium
- high
- critical
virus-scan-error:
description:
- Threat weight score for virus (scan error) detected.
choices:
- disable
- low
- medium
- high
- critical
status:
description:
- Enable/disable the threat weight feature.
choices:
- enable
- disable
url-block-detected:
description:
- Threat weight score for URL blocking.
choices:
- disable
- low
- medium
- high
- critical
web:
description:
- Web filtering threat weight settings.
suboptions:
category:
description:
- Threat weight score for web category filtering matches.
id:
description:
- Entry ID.
required: true
level:
description:
- Threat weight score for web category filtering matches.
choices:
- disable
- low
- medium
- high
- critical
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Configure threat weight settings.
fortios_log_threat_weight:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
log_threat_weight:
application:
-
category: "4"
id: "5"
level: "disable"
blocked-connection: "disable"
failed-connection: "disable"
geolocation:
-
country: "<your_own_value>"
id: "11"
level: "disable"
ips:
critical-severity: "disable"
high-severity: "disable"
info-severity: "disable"
low-severity: "disable"
medium-severity: "disable"
level:
critical: "20"
high: "21"
low: "22"
medium: "23"
malware:
botnet-connection: "disable"
command-blocked: "disable"
mimefragmented: "disable"
oversized: "disable"
switch-proto: "disable"
virus-blocked: "disable"
virus-file-type-executable: "disable"
virus-infected: "disable"
virus-outbreak-prevention: "disable"
virus-scan-error: "disable"
status: "enable"
url-block-detected: "disable"
web:
-
category: "38"
id: "39"
level: "disable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: string
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: string
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: string
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: string
sample: "key1"
name:
description: Name of the table used to fulfill the request
returned: always
type: string
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: string
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: string
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: string
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: string
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: string
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: string
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_log_threat_weight_data(json):
option_list = ['application', 'blocked-connection', 'failed-connection',
'geolocation', 'ips', 'level',
'malware', 'status', 'url-block-detected',
'web']
dictionary = {}
for attribute in option_list:
if attribute in json:
dictionary[attribute] = json[attribute]
return dictionary
def log_threat_weight(data, fos):
vdom = data['vdom']
log_threat_weight_data = data['log_threat_weight']
filtered_data = filter_log_threat_weight_data(log_threat_weight_data)
return fos.set('log',
'threat-weight',
data=filtered_data,
vdom=vdom)
def fortios_log(data, fos):
login(data)
methodlist = ['log_threat_weight']
for method in methodlist:
if data[method]:
resp = eval(method)(data, fos)
break
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": "False"},
"log_threat_weight": {
"required": False, "type": "dict",
"options": {
"application": {"required": False, "type": "list",
"options": {
"category": {"required": False, "type": "int"},
"id": {"required": True, "type": "int"},
"level": {"required": False, "type": "str",
"choices": ["disable", "low", "medium",
"high", "critical"]}
}},
"blocked-connection": {"required": False, "type": "str",
"choices": ["disable", "low", "medium",
"high", "critical"]},
"failed-connection": {"required": False, "type": "str",
"choices": ["disable", "low", "medium",
"high", "critical"]},
"geolocation": {"required": False, "type": "list",
"options": {
"country": {"required": False, "type": "str"},
"id": {"required": True, "type": "int"},
"level": {"required": False, "type": "str",
"choices": ["disable", "low", "medium",
"high", "critical"]}
}},
"ips": {"required": False, "type": "dict",
"options": {
"critical-severity": {"required": False, "type": "str",
"choices": ["disable", "low", "medium",
"high", "critical"]},
"high-severity": {"required": False, "type": "str",
"choices": ["disable", "low", "medium",
"high", "critical"]},
"info-severity": {"required": False, "type": "str",
"choices": ["disable", "low", "medium",
"high", "critical"]},
"low-severity": {"required": False, "type": "str",
"choices": ["disable", "low", "medium",
"high", "critical"]},
"medium-severity": {"required": False, "type": "str",
"choices": ["disable", "low", "medium",
"high", "critical"]}
}},
"level": {"required": False, "type": "dict",
"options": {
"critical": {"required": False, "type": "int"},
"high": {"required": False, "type": "int"},
"low": {"required": False, "type": "int"},
"medium": {"required": False, "type": "int"}
}},
"malware": {"required": False, "type": "dict",
"options": {
"botnet-connection": {"required": False, "type": "str",
"choices": ["disable", "low", "medium",
"high", "critical"]},
"command-blocked": {"required": False, "type": "str",
"choices": ["disable", "low", "medium",
"high", "critical"]},
"mimefragmented": {"required": False, "type": "str",
"choices": ["disable", "low", "medium",
"high", "critical"]},
"oversized": {"required": False, "type": "str",
"choices": ["disable", "low", "medium",
"high", "critical"]},
"switch-proto": {"required": False, "type": "str",
"choices": ["disable", "low", "medium",
"high", "critical"]},
"virus-blocked": {"required": False, "type": "str",
"choices": ["disable", "low", "medium",
"high", "critical"]},
"virus-file-type-executable": {"required": False, "type": "str",
"choices": ["disable", "low", "medium",
"high", "critical"]},
"virus-infected": {"required": False, "type": "str",
"choices": ["disable", "low", "medium",
"high", "critical"]},
"virus-outbreak-prevention": {"required": False, "type": "str",
"choices": ["disable", "low", "medium",
"high", "critical"]},
"virus-scan-error": {"required": False, "type": "str",
"choices": ["disable", "low", "medium",
"high", "critical"]}
}},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"url-block-detected": {"required": False, "type": "str",
"choices": ["disable", "low", "medium",
"high", "critical"]},
"web": {"required": False, "type": "list",
"options": {
"category": {"required": False, "type": "int"},
"id": {"required": True, "type": "int"},
"level": {"required": False, "type": "str",
"choices": ["disable", "low", "medium",
"high", "critical"]}
}}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_log(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
a4606cf74738347229e2423074a0eab57eddb3ef | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/6/ow1.py | ff3594bcc2829e13cbb5c24ef13111ffb291b242 | []
| no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'oW1':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
]
| |
959d08c86faa5429545a51552270f33743c50c74 | 3485140792e9bae67499fef138d50d046cccb256 | /datamining/AprioriProject/util/ProgressBar.py | 5858e003c7264ec53dab5ae6a814fc400029c84a | []
| no_license | ALREstevam/TopicosBD-DataMining-IBGE-Apriori | dc14a50ca8f3046b8125a183cdcb4e99d3c4c616 | 5bf8dee35df0f22902f7816b8738e585fdca3410 | refs/heads/master | 2020-03-17T04:38:08.111880 | 2018-06-14T12:14:11 | 2018-06-14T12:14:11 | 133,282,949 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,920 | py | import sys
class ProgressBar(object):
DEFAULT_BAR_LENGTH = 65
DEFAULT_CHAR_ON = '█'
DEFAULT_CHAR_OFF = '░'
def __init__(self, end, start=0):
self.end = end
self.start = start
self._barLength = self.__class__.DEFAULT_BAR_LENGTH
self.setLevel(self.start)
self._plotted = False
def setLevel(self, level):
self._level = level
if level < self.start: self._level = self.start
if level > self.end: self._level = self.end
value = float(self.end - self.start)
if(value == 0): value = 1
self._ratio = float(self._level - self.start) / value
self._levelChars = int(self._ratio * self._barLength)
def plotProgress(self):
sys.stdout.write("\r %3i%% [%s%s]" %(
int(self._ratio * 100.0),
self.__class__.DEFAULT_CHAR_ON * int(self._levelChars),
self.__class__.DEFAULT_CHAR_OFF * int(self._barLength - self._levelChars),
))
sys.stdout.flush()
self._plotted = True
def setAndPlot(self, level):
oldChars = self._levelChars
self.setLevel(level)
if (not self._plotted) or (oldChars != self._levelChars):
self.plotProgress()
def __add__(self, other):
assert type(other) in [float, int], "can only add a number"
self.setAndPlot(self._level + other)
return self
def __sub__(self, other):
return self.__add__(-other)
def __iadd__(self, other):
return self.__add__(other)
def __isub__(self, other):
return self.__add__(-other)
def __del__(self):
sys.stdout.write("\n")
'''
import time
for j in range(5):
count = 1000
pb = ProgressBar(count)
# pb.plotProgress()
for i in range(0, count):
pb += 1
# pb.setAndPlot(i + 1)
time.sleep(0.01)
print('\n\nSTEP {}'.format(j))
'''
| [
"[email protected]"
]
| |
6e210328858f6452857a8f09f3486b78b2ddc68c | 51fba32aca3114a6897e11b271ee29d3b038056c | /tests/08_test_patch.py | bc9a507c707597c9dbc29ad814635d12538e8e77 | []
| no_license | lamby/git-buildpackage | b2fbf08b93ed0520c8e5ba0c3eb66f15d7a64a41 | c4bc6561c788f71b5131d0bd8e92478e83808200 | refs/heads/master | 2021-01-02T23:04:26.941635 | 2017-08-05T23:46:58 | 2017-08-06T00:55:37 | 75,486,665 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,450 | py | # vim: set fileencoding=utf-8 :
"""Test L{Patch} class"""
from . import context # noqa: 401
import os
import unittest
from gbp.patch_series import Patch
class TestPatch(unittest.TestCase):
data_dir = os.path.splitext(__file__)[0] + '_data'
def test_filename(self):
"""Get patch information from the filename"""
p = Patch(os.path.join(self.data_dir, "doesnotexist.diff"))
self.assertEqual('doesnotexist', p.subject)
self.assertEqual({}, p.info)
p = Patch(os.path.join(self.data_dir, "doesnotexist.patch"))
self.assertEqual('doesnotexist', p.subject)
p = Patch(os.path.join(self.data_dir, "doesnotexist"))
self.assertEqual('doesnotexist', p.subject)
self.assertEqual(None, p.author)
self.assertEqual(None, p.email)
self.assertEqual(None, p.date)
def test_header(self):
"""Get the patch information from a patch header"""
patchfile = os.path.join(self.data_dir, "patch1.diff")
self.assertTrue(os.path.exists(patchfile))
p = Patch(patchfile)
self.assertEqual('This is patch1', p.subject)
self.assertEqual("foo", p.author)
self.assertEqual("[email protected]", p.email)
self.assertEqual("This is the long description.\n"
"It can span several lines.\n",
p.long_desc)
self.assertEqual('Sat, 24 Dec 2011 12:05:53 +0100', p.date)
| [
"[email protected]"
]
| |
12d7d0236d58487ba5f9d74bafeeeaeb487401aa | 3940b4a507789e1fbbaffeb200149aee215f655a | /lc/112.PathSum.py | 14e465def2db1577e4b4e9af3851db0a471c4446 | []
| no_license | akimi-yano/algorithm-practice | 15f52022ec79542d218c6f901a54396a62080445 | 1abc28919abb55b93d3879860ac9c1297d493d09 | refs/heads/master | 2023-06-11T13:17:56.971791 | 2023-06-10T05:17:56 | 2023-06-10T05:17:56 | 239,395,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,025 | py | # 112. Path Sum
# Easy
# 2849
# 583
# Add to List
# Share
# Given the root of a binary tree and an integer targetSum, return true if the tree has a root-to-leaf path such that adding up all the values along the path equals targetSum.
# A leaf is a node with no children.
# Example 1:
# Input: root = [5,4,8,11,null,13,4,7,2,null,null,null,1], targetSum = 22
# Output: true
# Example 2:
# Input: root = [1,2,3], targetSum = 5
# Output: false
# Example 3:
# Input: root = [1,2], targetSum = 0
# Output: false
# Constraints:
# The number of nodes in the tree is in the range [0, 5000].
# -1000 <= Node.val <= 1000
# -1000 <= targetSum <= 1000
# This solution works!:
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def hasPathSum(self, root: TreeNode, targetSum: int) -> bool:
def helper(cur, total):
nonlocal targetSum
total += cur.val
if not cur.left and not cur.right:
return targetSum == total
if cur.left and helper(cur.left, total):
return True
if cur.right and helper(cur.right, total):
return True
return False
if not root:
return False
return helper(root, 0)
# This solution also works!:
# Definition for a binary tree node
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param root, a tree node
# @param sum, an integer
# @return a boolean
# 1:27
def hasPathSum(self, root, sum):
if not root:
return False
if not root.left and not root.right and root.val == sum:
return True
sum -= root.val
return self.hasPathSum(root.left, sum) or self.hasPathSum(root.right, sum) | [
"[email protected]"
]
| |
e87acb9a972fbc841b375cd19b7a3397f02cb1d5 | 920bc59a07adc65569ae2d6736388519b43cfa23 | /business_logic/blockly/build.py | 9f732ce12acd96a8f9d521cb445158fd5990988a | [
"MIT"
]
| permissive | glafira-ivanova/django-business-logic | e924ccabac6b5219fd87dabe60c6e0ecfaa40303 | 7cc0d0475815082e75a16201daf9865d08d3f281 | refs/heads/master | 2021-01-11T05:35:35.193191 | 2016-10-24T12:59:04 | 2016-10-24T12:59:04 | 71,771,078 | 0 | 0 | null | 2016-10-24T09:03:42 | 2016-10-24T09:03:42 | null | UTF-8 | Python | false | false | 5,305 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
import inspect
from lxml import etree
from django.db.models import Model
from ..models import *
from .data import OPERATOR_TABLE
from .exceptions import BlocklyXmlBuilderException
def camel_case_to_snake_case(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
class BlocklyXmlBuilder(NodeCacheHolder):
def build(self, tree_root):
xml = etree.Element('xml')
self.visit(tree_root, parent_xml=xml)
return etree.tostring(xml, pretty_print=True).decode('utf-8')
def visit(self, node, parent_xml):
content_object = node.content_object
if content_object is None:
last_xml = None
for child in self.get_children(node):
if last_xml is not None:
next = etree.Element('next')
last_xml.append(next)
parent_xml = next
last_xml = self.visit(child, parent_xml)
return
for cls in inspect.getmro(content_object.__class__):
if cls == Model:
break
method_name = 'visit_{}'.format(camel_case_to_snake_case(cls.__name__))
method = getattr(self, method_name, None)
if not method:
continue
node_xml = method(node, parent_xml)
if not getattr(method, 'process_children', None):
for child in self.get_children(node):
self.visit(child, parent_xml)
return node_xml
def visit_constant(self, node, parent_xml):
block_type = {
NumberConstant: 'math_number',
StringConstant: 'text',
BooleanConstant: 'logic_boolean',
}
field_name = {
NumberConstant: 'NUM',
StringConstant: 'TEXT',
BooleanConstant: 'BOOL',
}
content_object = node.content_object
cls = content_object.__class__
block = etree.SubElement(parent_xml, 'block', type=block_type[cls])
field = etree.SubElement(block, 'field', name=field_name[cls])
if isinstance(content_object, BooleanConstant):
field.text = str(content_object).upper()
else:
field.text = str(content_object)
return block
def visit_variable(self, node, parent_xml):
variables_get_block = etree.SubElement(parent_xml, 'block', type='variables_get')
self._visit_variable(node, variables_get_block)
def visit_assignment(self, node, parent_xml):
lhs_node, rhs_node = self.get_children(node)
variables_set = etree.SubElement(parent_xml, 'block', type='variables_set')
self._visit_variable(lhs_node, variables_set)
value = etree.SubElement(variables_set, 'value', name='VALUE')
self.visit(rhs_node, value)
return variables_set
visit_assignment.process_children = True
def _visit_variable(self, node, parent_xml):
variable = node.content_object
field = etree.SubElement(parent_xml, 'field', name='VAR')
field.text = variable.definition.name
def visit_binary_operator(self, node, parent_xml):
# determine block_type
operator = node.content_object.operator
block_type = None
table = None
for block_type, table in OPERATOR_TABLE.items():
if operator in table:
break
else:
raise BlocklyXmlBuilderException('Invalid Operator: {}'.format(operator))
block = etree.SubElement(parent_xml, 'block', type=block_type)
field = etree.SubElement(block, 'field', name='OP')
field.text = table[operator]
lhs_node, rhs_node = self.get_children(node)
for value_name, child_node in (('A', lhs_node), ('B', rhs_node)):
value = etree.SubElement(block, 'value', name=value_name)
self.visit(child_node, value)
return block
visit_binary_operator.process_children = True
def visit_if_statement(self, node, parent_xml):
children = self.get_children(node)
block = etree.SubElement(parent_xml, 'block', type='controls_if')
if len(children) > 2:
mutation = etree.SubElement(block, 'mutation')
if len(children) % 2:
mutation.set('else', '1')
elifs = (len(children) - 2 - len(children) % 2) / 2
if elifs:
mutation.set('elseif', str(int(elifs)))
for i, pair in enumerate(pairs(children)):
# last "else" branch
if len(pair) == 1:
statement = etree.SubElement(block, 'statement', name='ELSE')
self.visit(pair[0], statement)
break
if_condition = pair[0]
if_value = etree.SubElement(block, 'value', name='IF{}'.format(i))
self.visit(if_condition, if_value)
statement = etree.SubElement(block, 'statement', name='DO{}'.format(i))
self.visit(pair[1], statement)
visit_if_statement.process_children = True
def tree_to_blockly_xml(tree_root):
return BlocklyXmlBuilder().build(tree_root)
def blockly_xml_to_tree(xml):
pass
| [
"[email protected]"
]
| |
1549dbbffe60bde02cbe4a4dc8ded721bb9ac421 | f56346f16477de58c5483ddbab63d3bff15801c6 | /python_source/graph-tool/example2.py | a7571e1e5d9faf83e8928fe282b25427d72ff25a | []
| no_license | jerryhan88/py_source | ca6afb6582777a444a19e33c832b638fc9e2fd52 | e1500b1d2d4fa5f30e278422c5b1afa1d777f57f | refs/heads/master | 2020-04-06T13:12:34.814275 | 2016-10-06T09:30:50 | 2016-10-06T09:30:50 | 40,874,811 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,517 | py | #! /usr/bin/env python
# We will need some things from several places
from __future__ import division, absolute_import, print_function
import sys
if sys.version_info < (3,):
range = xrange
import os
from pylab import * # for plotting
from numpy.random import * # for random sampling
seed(42)
# We need to import the graph_tool module itself
from graph_tool.all import *
def price_network():
# let's construct a Price network (the one that existed before Barabasi). It is
# a directed network, with preferential attachment. The algorithm below is
# very naive, and a bit slow, but quite simple.
# We start with an empty, directed graph
g = Graph()
# We want also to keep the age information for each vertex and edge. For that
# let's create some property maps
v_age = g.new_vertex_property("int")
e_age = g.new_edge_property("int")
# The final size of the network
N = 100
# We have to start with one vertex
v = g.add_vertex()
v_age[v] = 0
# we will keep a list of the vertices. The number of times a vertex is in this
# list will give the probability of it being selected.
vlist = [v]
# let's now add the new edges and vertices
for i in range(1, N):
# create our new vertex
v = g.add_vertex()
v_age[v] = i
# we need to sample a new vertex to be the target, based on its in-degree +
# 1. For that, we simply randomly sample it from vlist.
i = randint(0, len(vlist))
target = vlist[i]
# add edge
e = g.add_edge(v, target)
e_age[e] = i
# put v and target in the list
vlist.append(target)
vlist.append(v)
# now we have a graph!
# let's do a random walk on the graph and print the age of the vertices we find,
# just for fun.
v = g.vertex(randint(0, g.num_vertices()))
while True:
print("vertex:", int(v), "in-degree:", v.in_degree(), "out-degree:",
v.out_degree(), "age:", v_age[v])
if v.out_degree() == 0:
print("Nowhere else to go... We found the main hub!")
break
n_list = []
for w in v.out_neighbours():
n_list.append(w)
v = n_list[randint(0, len(n_list))]
# let's save our graph for posterity. We want to save the age properties as
# well... To do this, they must become "internal" properties:
g.vertex_properties["age"] = v_age
g.edge_properties["age"] = e_age
# now we can save it
g.save("price.xml.gz")
# Let's plot its in-degree distribution
in_hist = vertex_hist(g, "in")
y = in_hist[0]
err = sqrt(in_hist[0])
err[err >= y] = y[err >= y] - 1e-2
figure(figsize=(6,4))
errorbar(in_hist[1][:-1], in_hist[0], fmt="o", yerr=err,
label="in")
gca().set_yscale("log")
gca().set_xscale("log")
gca().set_ylim(1e-1, 1e5)
gca().set_xlim(0.8, 1e3)
subplots_adjust(left=0.2, bottom=0.2)
xlabel("$k_{in}$")
ylabel("$NP(k_{in})$")
tight_layout()
savefig("price-deg-dist.pdf")
savefig("price-deg-dist.png")
price_network()
g = load_graph("price.xml.gz")
age = g.vertex_properties["age"]
pos = sfdp_layout(g)
graph_draw(g, pos, output_size=(1000, 1000), vertex_color=[1,1,1,0],
vertex_fill_color=age, vertex_size=1, edge_pen_width=1.2,
vcmap=matplotlib.cm.gist_heat_r, output="price.png")
| [
"[email protected]"
]
| |
6e248c7365a903010c866c0f556d026a124c56af | 9c636aeed2fc0a591507fcf0a8a6124fae710c9b | /insertLL.py | 3d8c20f19faad87ef2b54e0fea3e0ad01926eb92 | []
| no_license | ilkaynazli/challenges | 4b2d1ac847b1761f98183457f8ea5bac6556eeff | f7c165fedbdc9811fb7f1d2a43c797f5b5ac5322 | refs/heads/master | 2020-04-07T01:03:18.625568 | 2019-04-25T19:40:22 | 2019-04-25T19:40:22 | 157,928,932 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,640 | py | """
Given a node from a cyclic linked list which is sorted in ascending order, write a function to insert a value into the list such that it remains a cyclic sorted list. The given node can be a reference to any single node in the list, and may not be necessarily the smallest value in the cyclic list.
If there are multiple suitable places for insertion, you may choose any place to insert the new value. After the insertion, the cyclic list should remain sorted.
If the list is empty (i.e., given node is null), you should create a new single cyclic list and return the reference to that single node. Otherwise, you should return the original given node.
"""
"""
# Definition for a Node.
class Node:
def __init__(self, val, next):
self.val = val
self.next = next
"""
class Solution:
def insert(self, head: 'Node', insertVal: 'int') -> 'Node':
new = Node(insertVal, None)
def insert_node(cur, new):
new.next = cur.next
cur.next = new
if head is None:
head = new
return head
current = head
while current:
if (current.val < new.val and current.next.val >= new.val):
insert_node(current, new)
break
if current.next.val < current.val:
if current.next.val >= new.val or current.val < new.val:
insert_node(current, new)
break
current = current.next
if current.next == head:
insert_node(current, new)
break
return head | [
"[email protected]"
]
| |
040da08f98ffd9b102de0d8c3fb12f826ce7f563 | 523e24bd96d7de004a13e34a58f5c2d79c8222e0 | /plugin.program.indigo/maintool.py | 7517a338760875f088560711b832ad5ef1cff331 | []
| no_license | Bonitillo/Bonitillonew | ec281e5ab9d4fec83d88936e8d8ce32bad6a81c9 | a8099e326dda297f66096480ec93def8a8c124a8 | refs/heads/master | 2022-10-13T05:39:01.126653 | 2017-03-21T16:47:23 | 2017-03-21T16:47:23 | 85,725,652 | 2 | 4 | null | 2022-09-30T21:18:58 | 2017-03-21T16:16:33 | Python | UTF-8 | Python | false | false | 7,349 | py | from urllib2 import Request, urlopen
import urllib2,urllib,re,os, shutil
import sys
import time,datetime
import xbmcplugin,xbmcgui,xbmc, xbmcaddon, downloader, extract, time
from libs import kodi
from libs import viewsetter
addon_id=kodi.addon_id
addon = (addon_id, sys.argv)
artwork = xbmc.translatePath(os.path.join('special://home','addons',addon_id,'art/'))
fanart = artwork+'fanart.jpg'
messages = xbmc.translatePath(os.path.join('special://home','addons',addon_id,'resources','messages/'))
execute = xbmc.executebuiltin
AddonTitle = 'Indigo'
########PATHS###############################################
addonPath=xbmcaddon.Addon(id=addon_id).getAddonInfo('path')
addonPath=xbmc.translatePath(addonPath)
xbmcPath=os.path.join(addonPath,"..","..")
KodiPath=os.path.abspath(xbmcPath)
############################################################
def tool_menu():
kodi.addItem("Clear Cache",'','clearcache',artwork+'clear_cache.png',description="Clear your device cache!")
kodi.addItem("Purge Packages",'','purgepackages',artwork+'purge_packages.png',description="Erase old addon update files!")
kodi.addItem("Wipe Addons",'','wipeaddons',artwork+'wipe_addons.png',description="Erase all your Kodi addons in one shot!")
kodi.addDir("Install Custom Keymaps",'','customkeys',artwork+'custom_keymaps.png',description="Get the best experience out of your device-specific remote control!")
if kodi.get_setting ('automain') == 'true':
kodi.addItem("Disable Auto Maintenance ",'','disablemain',artwork+'disable_AM.png',description="Disable the periodic automated erasing of cache and packages!")
if kodi.get_setting ('automain') == 'false':
kodi.addItem("Enable Auto Maintenance ",'','enablemain',artwork+'enable_AM.png',description="Enable the periodic automated erasing of cache and packages!")
if kodi.get_setting ('scriptblock') == 'true':
kodi.addItem("Disable Malicious Scripts Blocker",'','disableblocker',artwork+'disable_MSB.png',description="Disable protection against malicious scripts!")
if kodi.get_setting ('scriptblock') == 'false':
kodi.addItem("Enable Malicious Scripts Blocker",'','enableblocker',artwork+'enable_MSB.png',description="Enable protection against malicious scripts!")
viewsetter.set_view("sets")
################################
### Clear Cache ###
################################
def clear_cache():
kodi.log('CLEAR CACHE ACTIVATED')
xbmc_cache_path = os.path.join(xbmc.translatePath('special://home'), 'cache')
confirm=xbmcgui.Dialog().yesno("Please Confirm"," Please confirm that you wish to clear "," your Kodi application cache!"," ","Cancel","Clear")
if confirm:
if os.path.exists(xbmc_cache_path)==True:
for root, dirs, files in os.walk(xbmc_cache_path):
file_count = 0
file_count += len(files)
if file_count > 0:
for f in files:
try:
os.unlink(os.path.join(root, f))
except:
pass
for d in dirs:
try:
shutil.rmtree(os.path.join(root, d))
except:
pass
dialog = xbmcgui.Dialog()
dialog.ok(AddonTitle, " Cache Cleared Successfully!")
xbmc.executebuiltin("Container.Refresh()")
################################
### End Clear Cache ###
################################
def purge_packages():
kodi.log('PURGE PACKAGES ACTIVATED')
packages_path = xbmc.translatePath(os.path.join('special://home/addons/packages', ''))
confirm=xbmcgui.Dialog().yesno("Please Confirm"," Please confirm that you wish to delete "," your old addon installation packages!"," ","Cancel","Delete")
if confirm:
try:
for root, dirs, files in os.walk(packages_path,topdown=False):
for name in files :
os.remove(os.path.join(root,name))
dialog = xbmcgui.Dialog()
dialog.ok(AddonTitle, " Packages Folder Wiped Successfully!")
xbmc.executebuiltin("Container.Refresh()")
except:
dialog = xbmcgui.Dialog()
dialog.ok(AddonTitle, "Error Deleting Packages please visit TVADDONS.AG forums")
def wipe_addons():
kodi.logInfo('WIPE ADDONS ACTIVATED')
confirm=xbmcgui.Dialog().yesno("Please Confirm"," Please confirm that you wish to uninstall "," all addons from your device!"," ","Cancel","Uninstall")
if confirm:
addonPath=xbmcaddon.Addon(id=addon_id).getAddonInfo('path')
addonPath=xbmc.translatePath(addonPath)
xbmcPath=os.path.join(addonPath,"..","..")
xbmcPath=os.path.abspath(xbmcPath);
addonpath = xbmcPath+'/addons/'
mediapath = xbmcPath+'/media/'
systempath = xbmcPath+'/system/'
userdatapath = xbmcPath+'/userdata/'
packagepath = xbmcPath+ '/addons/packages/'
try:
for root, dirs, files in os.walk(addonpath,topdown=False):
print root
if root != addonpath :
if 'plugin.program.indigo' not in root:
if 'metadata.album.universal' not in root:
if 'metadata.artists.universal' not in root:
if 'metadata.common.musicbrainz.org' not in root:
if 'service.xbmc.versioncheck' not in root:
shutil.rmtree(root)
dialog = xbmcgui.Dialog()
dialog.ok(AddonTitle, "Addons Wiped Successfully! Click OK to exit Kodi and then restart to complete .")
xbmc.executebuiltin('ShutDown')
except:
dialog = xbmcgui.Dialog()
dialog.ok(AddonTitle, "Error Wiping Addons please visit TVADDONS.AG forums")
def disable_main():
#kodi.log('DISABLE MAIN TOOL')
confirm=xbmcgui.Dialog();
if confirm.yesno('Automatic Maintenance ',"Please confirm that you wish to TURN OFF automatic maintenance! "," "):
kodi.log ("Disabled AUTOMAIN")
kodi.set_setting('automain','false')
dialog = xbmcgui.Dialog()
dialog.ok("Automatic Maintenance", "Settings Changed! Click OK to exit Kodi and then restart to complete .")
xbmc.executebuiltin('ShutDown')
else:
return
def enable_main():
#kodi.log('ENABLE MAIN TOOL')
confirm=xbmcgui.Dialog();
if confirm.yesno('Automatic Maintenance ',"Please confirm that you wish to TURN ON automatic maintenance! "," "):
kodi.log ("enabled AUTOMAIN")
kodi.set_setting('automain','true')
dialog = xbmcgui.Dialog()
dialog.ok("Automatic Maintenance", "Settings Changed! Click OK to exit Kodi and then restart to complete .")
xbmc.executebuiltin('ShutDown')
else:
return
def disable_blocker():
#kodi.log('DISABLE BLOCKER')
confirm=xbmcgui.Dialog();
if confirm.yesno('Malicious Script Blocker',"Please confirm that you wish to TURN OFF Malicious Script Blocker! "," "):
kodi.log ("Disable Script Block")
kodi.set_setting('scriptblock','false')
dialog = xbmcgui.Dialog()
dialog.ok("Script Blocker", "Settings Changed! Click OK to exit Kodi and then restart to complete .")
xbmc.executebuiltin('ShutDown')
else:
return
def enable_blocker():
#kodi.log('ENABLE BLOCKER')
confirm=xbmcgui.Dialog();
if confirm.yesno('Malicious Script Blocker',"Please confirm that you wish to TURN ON Malicious Script Blocker! "," "):
kodi.log ("Enable Script Block")
kodi.set_setting('scriptblock','true')
dialog = xbmcgui.Dialog()
dialog.ok("Script Blocker", "Settings Changed! Click OK to exit Kodi and then restart to complete .")
xbmc.executebuiltin('ShutDown')
else:
return | [
"[email protected]"
]
| |
3eb53c7799362cdc6c41647804734c03d62b2e4e | a3e52fbdfc81da3d17fee3d11b4451b330bfd592 | /JudgeOnline/solution/hrank/algorithm/graph/shrotestReach.py | 69b1ab517d2a6ef79f88316198cd25092699b26d | []
| no_license | chrislucas/python | 79633915dd0aa8724ae3dfc5a3a32053f7a4f1e0 | d3cca374f87e134a7ddfc327a6daea983875ecac | refs/heads/master | 2021-01-17T04:08:25.056580 | 2016-12-26T11:41:31 | 2016-12-26T11:41:31 | 42,319,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | '''
Created on 11 de dez de 2016
@author: C.Lucas
https://www.hackerrank.com/challenges/bfsshortreach
'''
if __name__ == '__main__':
pass | [
"[email protected]"
]
| |
cd9c5f0b38a417ed37d7647c132d0f6a38efce1e | 066e874cc6d72d82e098d81a220cbbb1d66948f7 | /.hubspot/lib/python2.7/site-packages/rbtools/clients/__init__.py | dfb90185c05b6a487e37edf0bffcced75b55a8a2 | []
| no_license | webdeveloper001/flask-inboundlead | 776792485a998a0eaa4b14016c3a2066e75ff2a2 | d0a539d86342e9efc54d0c0a1adc02c609f0f762 | refs/heads/master | 2021-01-19T01:34:55.241144 | 2017-04-05T00:42:03 | 2017-04-05T00:42:03 | 87,248,885 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,675 | py | from __future__ import print_function, unicode_literals
import logging
import pkg_resources
import re
import six
import sys
from rbtools.utils.process import die, execute
# The clients are lazy loaded via load_scmclients()
SCMCLIENTS = None
class PatchResult(object):
"""The result of a patch operation.
This stores state on whether the patch could be applied (fully or
partially), whether there are conflicts that can be resolved (as in
conflict markers, not reject files), which files conflicted, and the
patch output.
"""
def __init__(self, applied, has_conflicts=False,
conflicting_files=[], patch_output=None):
self.applied = applied
self.has_conflicts = has_conflicts
self.conflicting_files = conflicting_files
self.patch_output = patch_output
class SCMClient(object):
"""A base representation of an SCM tool.
These are used for fetching repository information and generating diffs.
"""
name = None
supports_diff_extra_args = False
supports_diff_exclude_patterns = False
supports_patch_revert = False
can_amend_commit = False
can_merge = False
can_push_upstream = False
can_delete_branch = False
def __init__(self, config=None, options=None):
self.config = config or {}
self.options = options
self.capabilities = None
def get_repository_info(self):
return None
def check_options(self):
pass
def get_changenum(self, revisions):
"""Return the change number for the given revisions.
This is only used when the client is supposed to send a change number
to the server (such as with Perforce).
Args:
revisions (dict):
A revisions dictionary as returned by ``parse_revision_spec``.
Returns:
unicode:
The change number to send to the Review Board server.
"""
return None
def scan_for_server(self, repository_info):
"""Find the server path.
This will search for the server name in the .reviewboardrc config
files. These are loaded with the current directory first, and searching
through each parent directory, and finally $HOME/.reviewboardrc last.
"""
return self._get_server_from_config(self.config, repository_info)
def parse_revision_spec(self, revisions=[]):
"""Parses the given revision spec.
The 'revisions' argument is a list of revisions as specified by the
user. Items in the list do not necessarily represent a single revision,
since the user can use SCM-native syntaxes such as "r1..r2" or "r1:r2".
SCMTool-specific overrides of this method are expected to deal with
such syntaxes.
This will return a dictionary with the following keys:
'base': A revision to use as the base of the resulting diff.
'tip': A revision to use as the tip of the resulting diff.
'parent_base': (optional) The revision to use as the base of a
parent diff.
These will be used to generate the diffs to upload to Review Board (or
print). The diff for review will include the changes in (base, tip],
and the parent diff (if necessary) will include (parent, base].
If a single revision is passed in, this will return the parent of that
revision for 'base' and the passed-in revision for 'tip'.
If zero revisions are passed in, this will return revisions relevant
for the "current change". The exact definition of what "current" means
is specific to each SCMTool backend, and documented in the
implementation classes.
"""
return {
'base': None,
'tip': None,
}
def diff(self, revisions, include_files=[], exclude_patterns=[],
extra_args=[]):
"""
Returns the generated diff and optional parent diff for this
repository.
The return value must be a dictionary, and must have, at a minimum,
a 'diff' field. A 'parent_diff' can also be provided.
It may also return 'base_commit_id', representing the revision/ID of
the commit that the diff or parent diff is based on. This exists
because in some diff formats, this may different from what's provided
in the diff.
"""
return {
'diff': None,
'parent_diff': None,
'base_commit_id': None,
}
def _get_server_from_config(self, config, repository_info):
if 'REVIEWBOARD_URL' in config:
return config['REVIEWBOARD_URL']
elif 'TREES' in config:
trees = config['TREES']
if not isinstance(trees, dict):
die('Warning: "TREES" in config file is not a dict!')
# If repository_info is a list, check if any one entry is in trees.
path = None
if isinstance(repository_info.path, list):
for path in repository_info.path:
if path in trees:
break
else:
path = None
elif repository_info.path in trees:
path = repository_info.path
if path and 'REVIEWBOARD_URL' in trees[path]:
return trees[path]['REVIEWBOARD_URL']
return None
def _get_p_number(self, base_path, base_dir):
"""Return the appropriate value for the -p argument to patch.
This function returns an integer. If the integer is -1, then the -p
option should not be provided to patch. Otherwise, the return value is
the argument to patch -p.
"""
if base_path and base_dir.startswith(base_path):
return base_path.count('/') + 1
else:
return -1
def _strip_p_num_slashes(self, files, p_num):
"""Strips the smallest prefix containing p_num slashes from file names.
To match the behavior of the patch -pX option, adjacent slashes are
counted as a single slash.
"""
if p_num > 0:
regex = re.compile(r'[^/]*/+')
return [regex.sub('', f, p_num) for f in files]
else:
return files
def _execute(self, cmd, *args, **kwargs):
"""
Prints the results of the executed command and returns
the data result from execute.
"""
return execute(cmd, ignore_errors=True, *args, **kwargs)
def has_pending_changes(self):
"""Checks if there are changes waiting to be committed.
Derived classes should override this method if they wish to support
checking for pending changes.
"""
raise NotImplementedError
def apply_patch(self, patch_file, base_path, base_dir, p=None,
revert=False):
"""Apply the patch and return a PatchResult indicating its success."""
# Figure out the -p argument for patch. We override the calculated
# value if it is supplied via a commandline option.
p_num = p or self._get_p_number(base_path, base_dir)
cmd = ['patch']
if revert:
cmd.append('-R')
if p_num >= 0:
cmd.append('-p%d' % p_num)
cmd.extend(['-i', six.text_type(patch_file)])
# Ignore return code 2 in case the patch file consists of only empty
# files, which 'patch' can't handle. Other 'patch' errors also give
# return code 2, so we must check the command output.
rc, patch_output = execute(cmd, extra_ignore_errors=(2,),
return_error_code=True)
only_garbage_in_patch = ('patch: **** Only garbage was found in the '
'patch input.\n')
if (patch_output and patch_output.startswith('patch: **** ') and
patch_output != only_garbage_in_patch):
die('Failed to execute command: %s\n%s' % (cmd, patch_output))
# Check the patch for any added/deleted empty files to handle.
if self.supports_empty_files():
try:
with open(patch_file, 'rb') as f:
patch = f.read()
except IOError as e:
logging.error('Unable to read file %s: %s', patch_file, e)
return
patched_empty_files = self.apply_patch_for_empty_files(
patch, p_num, revert=revert)
# If there are no empty files in a "garbage-only" patch, the patch
# is probably malformed.
if (patch_output == only_garbage_in_patch and
not patched_empty_files):
die('Failed to execute command: %s\n%s' % (cmd, patch_output))
# TODO: Should this take into account apply_patch_for_empty_files ?
# The return value of that function is False both when it fails
# and when there are no empty files.
return PatchResult(applied=(rc == 0), patch_output=patch_output)
def create_commit(self, message, author, run_editor,
files=[], all_files=False):
"""Creates a commit based on the provided message and author.
Derived classes should override this method if they wish to support
committing changes to their repositories.
"""
raise NotImplementedError
def get_commit_message(self, revisions):
"""Returns the commit message from the commits in the given revisions.
This pulls out the first line from the commit messages of the
given revisions. That is then used as the summary.
"""
commit_message = self.get_raw_commit_message(revisions)
lines = commit_message.splitlines()
if not lines:
return None
result = {
'summary': lines[0],
}
# Try to pull the body of the commit out of the full commit
# description, so that we can skip the summary.
if len(lines) >= 3 and lines[0] and not lines[1]:
result['description'] = '\n'.join(lines[2:]).strip()
else:
result['description'] = commit_message
return result
def delete_branch(self, branch_name, merged_only=True):
"""Deletes the specified branch.
If merged_only is False, then the branch will be deleted even if not
yet merged into an upstream branch.
"""
raise NotImplementedError
def merge(self, target, destination, message, author, squash=False,
run_editor=False):
"""Merges the target branch with destination branch."""
raise NotImplementedError
def push_upstream(self, remote_branch):
"""Pushes the current branch to upstream."""
raise NotImplementedError
def get_raw_commit_message(self, revisions):
"""Extracts the commit messages on the commits in the given revisions.
Derived classes should override this method in order to allow callers
to fetch commit messages. This is needed for description guessing.
If a derived class is unable to fetch the description, ``None`` should
be returned.
Callers that need to differentiate the summary from the description
should instead use get_commit_message().
"""
raise NotImplementedError
def get_current_branch(self):
"""Returns the repository branch name of the current directory.
Derived classes should override this method if they are able to
determine the current branch of the working directory.
If a derived class is unable to unable to determine the branch,
``None`` should be returned.
"""
raise NotImplementedError
def supports_empty_files(self):
"""Check if the RB server supports added/deleted empty files.
This method returns False. To change this behaviour, override it in a
subclass.
"""
return False
def apply_patch_for_empty_files(self, patch, p_num, revert=False):
"""Return True if any empty files in the patch are applied.
If there are no empty files in the patch or if an error occurs while
applying the patch, we return False.
"""
raise NotImplementedError
def amend_commit_description(self, message, revisions=None):
"""Update a commit message to the given string.
The optional revisions argument exists to provide compatibility with
SCMs that allow modification of multiple changesets at any given time.
It takes a parsed revision spec, and will amend the change referenced
by the tip revision therein.
"""
raise NotImplementedError
class RepositoryInfo(object):
"""
A representation of a source code repository.
"""
def __init__(self, path=None, base_path=None, supports_changesets=False,
supports_parent_diffs=False):
self.path = path
self.base_path = base_path
self.supports_changesets = supports_changesets
self.supports_parent_diffs = supports_parent_diffs
logging.debug('repository info: %s' % self)
def __str__(self):
return 'Path: %s, Base path: %s, Supports changesets: %s' % \
(self.path, self.base_path, self.supports_changesets)
def set_base_path(self, base_path):
if not base_path.startswith('/'):
base_path = '/' + base_path
logging.debug('changing repository info base_path from %s to %s',
(self.base_path, base_path))
self.base_path = base_path
def find_server_repository_info(self, server):
"""
Try to find the repository from the list of repositories on the server.
For Subversion, this could be a repository with a different URL. For
all other clients, this is a noop.
"""
return self
def load_scmclients(config, options):
global SCMCLIENTS
SCMCLIENTS = {}
for ep in pkg_resources.iter_entry_points(group='rbtools_scm_clients'):
try:
SCMCLIENTS[ep.name] = ep.load()(config=config, options=options)
except Exception:
logging.exception('Could not load SCM Client "%s"', ep.name)
def scan_usable_client(config, options, client_name=None):
from rbtools.clients.perforce import PerforceClient
repository_info = None
tool = None
# TODO: We should only load all of the scm clients if the
# client_name isn't provided.
if SCMCLIENTS is None:
load_scmclients(config, options)
if client_name:
if client_name not in SCMCLIENTS:
logging.error('The provided repository type "%s" is invalid.' %
client_name)
sys.exit(1)
else:
scmclients = {
client_name: SCMCLIENTS[client_name]
}
else:
scmclients = SCMCLIENTS
for name, tool in six.iteritems(scmclients):
logging.debug('Checking for a %s repository...' % tool.name)
repository_info = tool.get_repository_info()
if repository_info:
break
if not repository_info:
if client_name:
logging.error('The provided repository type was not detected '
'in the current directory.')
elif getattr(options, 'repository_url', None):
logging.error('No supported repository could be accessed at '
'the supplied url.')
else:
logging.error('The current directory does not contain a checkout '
'from a supported source code repository.')
sys.exit(1)
# Verify that options specific to an SCM Client have not been mis-used.
if (getattr(options, 'change_only', False) and
not repository_info.supports_changesets):
sys.stderr.write('The --change-only option is not valid for the '
'current SCM client.\n')
sys.exit(1)
if (getattr(options, 'parent_branch', None) and
not repository_info.supports_parent_diffs):
sys.stderr.write('The --parent option is not valid for the '
'current SCM client.\n')
sys.exit(1)
if (not isinstance(tool, PerforceClient) and
(getattr(options, 'p4_client', None) or
getattr(options, 'p4_port', None))):
sys.stderr.write('The --p4-client and --p4-port options are not valid '
'for the current SCM client.\n')
sys.exit(1)
return (repository_info, tool)
def print_clients(config, options):
"""Print the supported detected SCM clients.
Each SCM client, including those provided by third party packages,
will be printed. Additionally, SCM clients which are detected in
the current directory will be highlighted.
"""
print('The following repository types are supported by this installation')
print('of RBTools. Each "<type>" may be used as a value for the')
print('"--repository-type=<type>" command line argument. Repository types')
print('which are detected in the current directory are marked with a "*"')
print('[*] "<type>": <Name>')
if SCMCLIENTS is None:
load_scmclients(config, options)
for name, tool in six.iteritems(SCMCLIENTS):
repository_info = tool.get_repository_info()
if repository_info:
print(' * "%s": %s' % (name, tool.name))
else:
print(' "%s": %s' % (name, tool.name))
| [
"[email protected]"
]
| |
a6a0280c8e64c9065944e620048f6383364b2778 | 5c205eab11d14b63e9fa7267d353448bc3761757 | /dnanexus/tf_workflow.py | ebec312ef82cccbec7f9d076eeb5a0068a9b8fef | []
| no_license | huboqiang/tf_chipseq | 909e07fa57698b398c957e47acae481691b04a57 | 6564b2e8f332b23d849c408e205781782f561ede | refs/heads/master | 2020-12-24T02:22:44.284115 | 2015-03-13T22:12:21 | 2015-03-13T22:12:21 | 32,879,755 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,183 | py | #!/usr/bin/env python
'''Instantiate the ENCODE ChIP-seq workflow'''
import pdb
import os.path, sys, subprocess, logging, re
import dxpy
EPILOG = '''Notes:
Examples:
Build blank workflow from fastq to peaks (no IDR)
%(prog)s
Build a blank workflow that includes both naive peak calling and IDR.
%(prog)s --idr
Build and run a workflow, specifying fastq's for two replicates and matched controls, including naive peaks and IDR.
%(prog)s --rep1 r1.fastq.gz --rep2 r2.fastq.gz --ctl1 c1.fastq.gz --ctl2 c2.fastq.gz --idr --yes
Build and run a workflow, specifying fastq's for two replicates and matched controls, reporting only IDR-processed peaks.
%(prog)s --rep1 r1.fastq.gz --rep2 r2.fastq.gz --ctl1 c1.fastq.gz --ctl2 c2.fastq.gz --idronly --yes
Build and run a workflow, skipping mapping and starting from tagAligns from paired-end data, reporting both naive and IDR-processed peaks.
%(prog)s --rep1 f1.tagAlign.gz --rep2 r2.tagAlign.gz --ctl1 c1.tagAlign.gz --ctl2 c2.tagAlign.gz --rep1pe --rep2pe --idr --yes
'''
WF_TITLE = 'tf_chip_seq'
WF_DESCRIPTION = 'ENCODE TF ChIP-Seq Pipeline'
MAPPING_APPLET_NAME = 'encode_bwa'
FILTER_QC_APPLET_NAME = 'filter_qc'
XCOR_APPLET_NAME = 'xcor'
XCOR_ONLY_APPLET_NAME = 'xcor_only'
SPP_APPLET_NAME = 'spp'
POOL_APPLET_NAME = 'pool'
PSEUDOREPLICATOR_APPLET_NAME = 'pseudoreplicator'
ENCODE_SPP_APPLET_NAME = 'encode_spp'
IDR_APPLET_NAME='idr'
ENCODE_IDR_APPLET_NAME='encode_idr'
APPLETS = {}
def get_args():
import argparse
parser = argparse.ArgumentParser(
description=__doc__, epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--debug', help="Print debug messages", default=False, action='store_true')
parser.add_argument('--reference', help="Reference tar to map to", default='ENCODE Reference Files:/hg19/hg19_XY.tar.gz')
parser.add_argument('--rep1', help="Replicate 1 fastq or tagAlign", default=None, nargs='*')
parser.add_argument('--rep2', help="Replicate 2 fastq or tagAlign", default=None, nargs='*')
parser.add_argument('--ctl1', help="Control for replicate 1 fastq or tagAlign", default=None, nargs='*')
parser.add_argument('--ctl2', help="Control for replicate 2 fastq or tagAlign", default=None, nargs='*')
parser.add_argument('--outp', help="Output project name or ID", default=dxpy.WORKSPACE_ID)
parser.add_argument('--outf', help="Output folder name or ID", default="/analysis_run")
parser.add_argument('--name', help="Name of new workflow", default="TF ChIP-Seq")
parser.add_argument('--applets', help="Name of project containing applets", default="E3 ChIP-seq")
parser.add_argument('--nomap', help='Given tagAligns, skip to peak calling', default=False, action='store_true')
parser.add_argument('--rep1pe', help='Specify rep1 is paired end (only if --nomap)', default=False, action='store_true')
parser.add_argument('--rep2pe', help='Specify rep2 is paired end (only if --nomap)', default=False, action='store_true')
parser.add_argument('--blacklist', help="Blacklist to filter IDR peaks", default='ENCODE Reference Files:/hg19/blacklists/wgEncodeDacMapabilityConsensusExcludable.bed.gz')
parser.add_argument('--idr', help='Report peaks with and without IDR analysis', default=False, action='store_true')
parser.add_argument('--idronly', help='Only report IDR peaks', default=None, action='store_true')
parser.add_argument('--yes', help='Run the workflow', default=False, action='store_true')
args = parser.parse_args()
global DEBUG
DEBUG = args.debug
if DEBUG:
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
else: #use the defaulf logging level
logging.basicConfig(format='%(levelname)s:%(message)s')
logging.debug("rep1 is: %s" %(args.rep1))
return args
def blank_workflow(args):
return
def map_and_filter(infile, args):
if not infile:
return {None}
stages = {None}
return stages
def call_peaks(expvsctl, args):
if not expvsctl:
return {None}
stages = {None}
return stages
def resolve_project(identifier, privs='r'):
project = dxpy.find_one_project(name=identifier, level='VIEW', name_mode='exact', return_handler=True, zero_ok=True)
if project == None:
try:
project = dxpy.get_handler(identifier)
except:
logging.error('Could not find a unique project with name or id %s' %(identifier))
raise ValueError(identifier)
logging.debug('Project %s access level is %s' %(project.name, project.describe()['level']))
if privs == 'w' and project.describe()['level'] == 'VIEW':
logging.error('Output project %s is read-only' %(identifier))
raise ValueError(identifier)
return project
def resolve_folder(project, identifier):
if not identifier.startswith('/'):
identifier = '/' + identifier
try:
project_id = project.list_folder(identifier)
except:
try:
project_id = project.new_folder(identifier, parents=True)
except:
logging.error("Cannot create folder %s in project %s" %(identifier, project.name))
raise ValueError('%s:%s' %(project.name, identifier))
else:
logging.info("New folder %s created in project %s" %(identifier, project.name))
return identifier
def resolve_file(identifier):
logging.debug("resolve_file: %s" %(identifier))
if not identifier:
return None
m = re.match(r'''^([\w\-\ \.]+):([\w\-\ /\.]+)''', identifier)
if m:
project_identifier = m.group(1)
file_identifier = m.group(2)
else:
logging.debug("Defaulting to the current project")
project_identifier = dxpy.WORKSPACE_ID
file_identifier = identifier
project = resolve_project(project_identifier)
logging.debug("Got project %s" %(project.name))
logging.debug("Now looking for file %s" %(file_identifier))
m = re.match(r'''(^[\w\-\ /\.]+)/([\w\-\ \.]+)''', file_identifier)
if m:
folder_name = m.group(1)
if not folder_name.startswith('/'):
folder_name = '/' + folder_name
file_name = m.group(2)
else:
folder_name = '/'
file_name = file_identifier
logging.debug("Looking for file %s in folder %s" %(file_name, folder_name))
try:
file_handler = dxpy.find_one_data_object(name=file_name, folder=folder_name, project=project.get_id(),
more_ok=False, zero_ok=False, return_handler=True)
except:
logging.debug('%s not found in project %s folder %s' %(file_name, project.get_id(), folder_name))
try:
file_handler = dxpy.DXFile(dxid=identifier, mode='r')
except:
logging.debug('%s not found as a dxid' %(identifier))
try:
file_handler = resolve_accession(identifier)
except:
logging.debug('%s not found as an accession' %(identifier))
logging.warning('Could not find file %s.' %(identifier))
return None
logging.info("Resolved file identifier %s to %s" %(identifier, file_handler.get_id()))
return file_handler
def resolve_accession(accession):
logging.debug("Looking for accession %s" %(accession))
if not re.match(r'''^ENCFF\d{3}[A-Z]{3}''', accession):
logging.debug("%s is not a valid accession format" %(accession))
raise ValueError(accession)
DNANEXUS_ENCODE_SNAPSHOT = 'ENCODE-SDSC-snapshot-20140505'
logging.debug('Testing')
try:
snapshot_project
except:
logging.debug('Looking for snapshot project %s' %(DNANEXUS_ENCODE_SNAPSHOT))
try:
project_handler = resolve_project(DNANEXUS_ENCODE_SNAPSHOT)
global snapshot_project
snapshot_project = project_handler
except:
logging.error("Cannot find snapshot project %s" %(DNANEXUS_ENCODE_SNAPSHOT))
raise ValueError(DNANEXUS_ENCODE_SNAPSHOT)
logging.debug('Found snapshot project %s' %(snapshot_project.name))
try:
accession_search = accession + '*'
logging.debug('Looking recursively for %s in %s' %(accession_search, snapshot_project.name))
file_handler = dxpy.find_one_data_object(
name=accession_search, name_mode='glob', more_ok=False, classname='file', recurse=True, return_handler=True,
folder='/', project=snapshot_project.get_id())
logging.debug('Got file handler for %s' %(file_handler.name))
return file_handler
except:
logging.error("Cannot find accession %s in project %s" %(accession, snapshot_project.name))
raise ValueError(accession)
def find_applet_by_name(applet_name, applets_project_id):
'''Looks up an applet by name in the project that holds tools. From Joe Dale's code.'''
cached = '*'
if (applet_name, applets_project_id) not in APPLETS:
found = dxpy.find_one_data_object(classname="applet", name=applet_name,
project=applets_project_id,
zero_ok=False, more_ok=False, return_handler=True)
APPLETS[(applet_name, applets_project_id)] = found
cached = ''
logging.info(cached + "Resolved applet %s to %s" %(applet_name, APPLETS[(applet_name, applets_project_id)].get_id()))
return APPLETS[(applet_name, applets_project_id)]
def main():
args = get_args()
output_project = resolve_project(args.outp, 'w')
logging.info('Found output project %s' %(output_project.name))
output_folder = resolve_folder(output_project, args.outf)
logging.info('Using output folder %s' %(output_folder))
applet_project = resolve_project(args.applets, 'r')
logging.info('Found applet project %s' %(applet_project.name))
workflow = dxpy.new_dxworkflow(
title=WF_TITLE,
name=args.name,
description=WF_DESCRIPTION,
project=output_project.get_id(),
folder=output_folder)
blank_workflow = not (args.rep1 or args.rep2 or args.ctl1 or args.ctl2)
if not args.nomap:
#this whole strategy is fragile and unsatisfying
#subsequent code assumes reps come before contols
#a "superstage" is just a dict with a name, name(s) of input files, and then names and id's of stages that process that input
#each superstage here could be implemented as a stage in a more abstract workflow. That stage would then call the various applets that are separate
#stages here.
mapping_superstages = [
{'name': 'Rep1', 'input_args': args.rep1},
{'name': 'Rep2', 'input_args': args.rep2},
{'name': 'Ctl1', 'input_args': args.ctl1},
{'name': 'Ctl2', 'input_args': args.ctl2}
# {'name': 'Pooled Reps', 'input_args': (args.rep1 and args.rep2)},
# {'name': 'Pooled Controls', 'input_args': (args.ctl1 and args.ctl2)} ##idea is to create a "stub" stage and then populate it's input with the output of the pool stage, defined below
]
mapping_applet = find_applet_by_name(MAPPING_APPLET_NAME, applet_project.get_id())
mapping_output_folder = resolve_folder(output_project, output_folder + '/' + mapping_applet.name)
reference_tar = resolve_file(args.reference)
filter_qc_applet = find_applet_by_name(FILTER_QC_APPLET_NAME, applet_project.get_id())
filter_qc_output_folder = mapping_output_folder
xcor_applet = find_applet_by_name(XCOR_APPLET_NAME, applet_project.get_id())
xcor_output_folder = mapping_output_folder
for mapping_superstage in mapping_superstages:
superstage_name = mapping_superstage.get('name')
if mapping_superstage.get('input_args') or blank_workflow:
if blank_workflow:
mapping_stage_input = None
else:
mapping_stage_input = {'reference_tar' : dxpy.dxlink(reference_tar.get_id())}
for arg_index,input_arg in enumerate(mapping_superstage['input_args']): #read pairs assumed be in order read1,read2
reads = dxpy.dxlink(resolve_file(input_arg).get_id())
mapping_stage_input.update({'reads%d' %(arg_index+1): reads})
mapped_stage_id = workflow.add_stage(
mapping_applet,
name='Map %s' %(superstage_name),
folder=mapping_output_folder,
stage_input=mapping_stage_input
)
mapping_superstage.update({'map_stage_id': mapped_stage_id})
filter_qc_stage_id = workflow.add_stage(
filter_qc_applet,
name='Filter_QC %s' %(superstage_name),
folder=filter_qc_output_folder,
stage_input={
'input_bam': dxpy.dxlink({'stage': mapped_stage_id, 'outputField': 'mapped_reads'}),
'paired_end': dxpy.dxlink({'stage': mapped_stage_id, 'outputField': 'paired_end'})
}
)
mapping_superstage.update({'filter_qc_stage_id': filter_qc_stage_id})
xcor_stage_id = workflow.add_stage(
xcor_applet,
name='Xcor %s' %(superstage_name),
folder=xcor_output_folder,
stage_input={
'input_bam': dxpy.dxlink({'stage': filter_qc_stage_id, 'outputField': 'filtered_bam'}),
'paired_end': dxpy.dxlink({'stage': filter_qc_stage_id, 'outputField': 'paired_end'})
}
)
mapping_superstage.update({'xcor_stage_id': xcor_stage_id})
exp_rep1_ta = dxpy.dxlink(
{'stage': next(ss.get('xcor_stage_id') for ss in mapping_superstages if ss['name'] == 'Rep1'),
'outputField': 'tagAlign_file'})
exp_rep1_cc = dxpy.dxlink(
{'stage': next(ss.get('xcor_stage_id') for ss in mapping_superstages if ss['name'] == 'Rep1'),
'outputField': 'CC_scores_file'})
exp_rep2_ta = dxpy.dxlink(
{'stage': next(ss.get('xcor_stage_id') for ss in mapping_superstages if ss['name'] == 'Rep2'),
'outputField': 'tagAlign_file'})
exp_rep2_cc = dxpy.dxlink(
{'stage': next(ss.get('xcor_stage_id') for ss in mapping_superstages if ss['name'] == 'Rep2'),
'outputField': 'CC_scores_file'})
ctl_rep1_ta = dxpy.dxlink(
{'stage' : next(ss.get('xcor_stage_id') for ss in mapping_superstages if ss['name'] == 'Ctl1'),
'outputField': 'tagAlign_file'})
ctl_rep2_ta = dxpy.dxlink(
{'stage' : next(ss.get('xcor_stage_id') for ss in mapping_superstages if ss['name'] == 'Ctl2'),
'outputField': 'tagAlign_file'})
rep1_paired_end = dxpy.dxlink(
{'stage': next(ss.get('xcor_stage_id') for ss in mapping_superstages if ss['name'] == 'Rep1'),
'outputField': 'paired_end'})
rep2_paired_end = dxpy.dxlink(
{'stage': next(ss.get('xcor_stage_id') for ss in mapping_superstages if ss['name'] == 'Rep2'),
'outputField': 'paired_end'})
else: #skipped the mapping, so just bring in the inputs from arguments
exp_rep1_ta = dxpy.dxlink(resolve_file(args.rep1[0]).get_id())
exp_rep2_ta = dxpy.dxlink(resolve_file(args.rep2[0]).get_id())
ctl_rep1_ta = dxpy.dxlink(resolve_file(args.ctl1[0]).get_id())
ctl_rep2_ta = dxpy.dxlink(resolve_file(args.ctl2[0]).get_id())
rep1_paired_end = args.rep1pe
rep2_paired_end = args.rep2pe
#here we need to calculate the cc scores files, because we're only being supplied tagAligns
#if we had mapped everything above we'd already have a handle to the cc file
xcor_only_applet = find_applet_by_name(XCOR_ONLY_APPLET_NAME, applet_project.get_id())
xcor_output_folder = resolve_folder(output_project, output_folder + '/' + xcor_only_applet.name)
xcor_only_stages = []
exp_rep1_cc_stage_id = workflow.add_stage(
xcor_only_applet,
name="Rep1 cross-correlation",
folder=xcor_output_folder,
stage_input={
'input_tagAlign': exp_rep1_ta,
'paired_end': rep1_paired_end
}
)
xcor_only_stages.append({'xcor_only_rep1_id': exp_rep1_cc_stage_id})
exp_rep1_cc = dxpy.dxlink(
{'stage': exp_rep1_cc_stage_id,
'outputField': 'CC_scores_file'})
exp_rep2_cc_stage_id = workflow.add_stage(
xcor_only_applet,
name="Rep2 cross-correlation",
folder=xcor_output_folder,
stage_input={
'input_tagAlign': exp_rep2_ta,
'paired_end': rep2_paired_end
}
)
xcor_only_stages.append({'xcor_only_rep2_id': exp_rep2_cc_stage_id})
exp_rep2_cc = dxpy.dxlink(
{'stage': exp_rep2_cc_stage_id,
'outputField': 'CC_scores_file'})
if not args.idronly:
spp_applet = find_applet_by_name(SPP_APPLET_NAME, applet_project.get_id())
peaks_output_folder = resolve_folder(output_project, output_folder + '/' + spp_applet.name)
spp_stages = []
if (args.rep1 and args.ctl1) or blank_workflow:
rep1_spp_stage_id = workflow.add_stage(
spp_applet,
name='Peaks Rep1',
folder=peaks_output_folder,
stage_input={
'experiment': exp_rep1_ta,
'control': ctl_rep1_ta,
'xcor_scores_input': exp_rep1_cc
}
)
spp_stages.append({'name': 'Peaks Rep1', 'stage_id': rep1_spp_stage_id})
if (args.rep2 and args.ctl2) or blank_workflow:
rep2_spp_stage_id = workflow.add_stage(
spp_applet,
name='Peaks Rep2',
folder=peaks_output_folder,
stage_input={
'experiment': exp_rep2_ta,
'control': ctl_rep2_ta,
'xcor_scores_input': exp_rep2_cc
}
)
spp_stages.append({'name': 'Peaks Rep2', 'stage_id': rep2_spp_stage_id})
if args.idr or args.idronly:
encode_spp_applet = find_applet_by_name(ENCODE_SPP_APPLET_NAME, applet_project.get_id())
encode_spp_stages = []
idr_peaks_output_folder = resolve_folder(output_project, output_folder + '/' + encode_spp_applet.name)
if (args.rep1 and args.ctl1 and args.rep2 and args.ctl2) or blank_workflow:
encode_spp_stage_id = workflow.add_stage(
encode_spp_applet,
name='Peaks for IDR',
folder=idr_peaks_output_folder,
stage_input={
'rep1_ta' : exp_rep1_ta,
'rep2_ta' : exp_rep2_ta,
'ctl1_ta': ctl_rep1_ta,
'ctl2_ta' : ctl_rep2_ta,
'rep1_xcor' : exp_rep1_cc,
'rep2_xcor' : exp_rep2_cc,
'rep1_paired_end': rep1_paired_end,
'rep2_paired_end': rep2_paired_end
}
)
encode_spp_stages.append({'name': 'Peaks for IDR', 'stage_id': encode_spp_stage_id})
idr_applet = find_applet_by_name(IDR_APPLET_NAME, applet_project.get_id())
encode_idr_applet = find_applet_by_name(ENCODE_IDR_APPLET_NAME, applet_project.get_id())
idr_stages = []
idr_output_folder = resolve_folder(output_project, output_folder + '/' + idr_applet.name)
if (args.rep1 and args.ctl1 and args.rep2 and args.ctl2) or blank_workflow or args.idronly:
idr_stage_id = workflow.add_stage(
idr_applet,
name='IDR True Replicates',
folder=idr_output_folder,
stage_input={
'rep1_peaks' : dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_spp_stages if ss['name'] == 'Peaks for IDR'),
'outputField': 'rep1_peaks'}),
'rep2_peaks' : dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_spp_stages if ss['name'] == 'Peaks for IDR'),
'outputField': 'rep2_peaks'}),
'pooled_peaks': dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_spp_stages if ss['name'] == 'Peaks for IDR'),
'outputField': 'pooled_peaks'})
}
)
idr_stages.append({'name': 'IDR True Replicates', 'stage_id': idr_stage_id})
idr_stage_id = workflow.add_stage(
idr_applet,
name='IDR Rep 1 Self-pseudoreplicates',
folder=idr_output_folder,
stage_input={
'rep1_peaks' : dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_spp_stages if ss['name'] == 'Peaks for IDR'),
'outputField': 'rep1pr1_peaks'}),
'rep2_peaks' : dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_spp_stages if ss['name'] == 'Peaks for IDR'),
'outputField': 'rep1pr2_peaks'}),
'pooled_peaks': dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_spp_stages if ss['name'] == 'Peaks for IDR'),
'outputField': 'rep1_peaks'})
}
)
idr_stages.append({'name': 'IDR Rep 1 Self-pseudoreplicates', 'stage_id': idr_stage_id})
idr_stage_id = workflow.add_stage(
idr_applet,
name='IDR Rep 2 Self-pseudoreplicates',
folder=idr_output_folder,
stage_input={
'rep1_peaks' : dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_spp_stages if ss['name'] == 'Peaks for IDR'),
'outputField': 'rep2pr1_peaks'}),
'rep2_peaks' : dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_spp_stages if ss['name'] == 'Peaks for IDR'),
'outputField': 'rep2pr2_peaks'}),
'pooled_peaks': dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_spp_stages if ss['name'] == 'Peaks for IDR'),
'outputField': 'rep2_peaks'})
}
)
idr_stages.append({'name': 'IDR Rep 2 Self-pseudoreplicates', 'stage_id': idr_stage_id})
idr_stage_id = workflow.add_stage(
idr_applet,
name='IDR Pooled Pseudoeplicates',
folder=idr_output_folder,
stage_input={
'rep1_peaks' : dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_spp_stages if ss['name'] == 'Peaks for IDR'),
'outputField': 'pooledpr1_peaks'}),
'rep2_peaks' : dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_spp_stages if ss['name'] == 'Peaks for IDR'),
'outputField': 'pooledpr2_peaks'}),
'pooled_peaks': dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_spp_stages if ss['name'] == 'Peaks for IDR'),
'outputField': 'pooled_peaks'})
}
)
idr_stages.append({'name': 'IDR Pooled Pseudoreplicates', 'stage_id': idr_stage_id})
blacklist = resolve_file(args.blacklist)
idr_stage_id = workflow.add_stage(
encode_idr_applet,
name='Final IDR peak calls',
folder=idr_output_folder,
stage_input={
'reps_peaks' : dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in idr_stages if ss['name'] == 'IDR True Replicates'),
'outputField': 'IDR_peaks'}),
'r1pr_peaks' : dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in idr_stages if ss['name'] == 'IDR Rep 1 Self-pseudoreplicates'),
'outputField': 'IDR_peaks'}),
'r2pr_peaks' : dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in idr_stages if ss['name'] == 'IDR Rep 2 Self-pseudoreplicates'),
'outputField': 'IDR_peaks'}),
'pooledpr_peaks': dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in idr_stages if ss['name'] == 'IDR Pooled Pseudoreplicates'),
'outputField': 'IDR_peaks'}),
'blacklist': dxpy.dxlink(blacklist.get_id())
}
)
idr_stages.append({'name': 'Final IDR peak calls', 'stage_id': idr_stage_id})
if not (args.nomap):
logging.debug("Mapping stages: %s" %(mapping_superstages))
else:
logging.debug("xcor only stages: %s" %(xcor_only_stages))
if not args.idronly:
logging.debug("Peak stages: %s" %(spp_stages))
if args.idr or args.idronly:
logging.debug("Peaks for IDR stages: %s" %(encode_spp_stages))
logging.debug("IDR stages: %s" %(idr_stages))
if args.yes:
job_id = workflow.run({}, delay_workspace_destruction=True)
logging.info("Running as job %s" %(job_id))
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
0d5a4132c1a3c779a764137edb3a3e33431d8662 | fa89836a6759151896a07650747462b8cda40610 | /mse/about/migrations/0010_event_ordinal.py | 60b846d5a892fc107b9a4bef92acd71c0bed9132 | []
| no_license | DigitalGizmo/mse21 | 334813bfebec9b78f0541744e54f218f9cc6936b | 89f1c0f9c05cefaaa8c703732ee4e4642aecd3c9 | refs/heads/master | 2023-07-09T13:29:13.903900 | 2018-03-26T19:26:09 | 2018-03-26T19:26:09 | 126,878,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('about', '0009_single'),
]
operations = [
migrations.AddField(
model_name='event',
name='ordinal',
field=models.IntegerField(verbose_name='Order in Menu', default=99),
),
]
| [
"[email protected]"
]
| |
e3205ca78ec9c5c4154d6e2bc096e8713b5deffc | 78883afed6f95bc0aae9f48e9d20a4a7c77adb32 | /plugins/secpicam480.py | d9c6855043be61e4c9b27797e8255abed9640c19 | []
| no_license | xe1gyq/speed-camera | f7da04162afaece15033971e23692f5f24a715ed | 71306c058235bf1a7fb00c484c9d34f4ac0fefae | refs/heads/master | 2021-03-30T21:18:50.236194 | 2018-02-26T20:07:13 | 2018-02-26T20:07:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,341 | py | # ---------------- User Configuration Settings for speed-cam.py ---------------------------------
# Ver 8.4 speed-cam.py picam480 Stream Variable Configuration Settings
#######################################
# speed-cam.py plugin settings
#######################################
# Calibration Settings
# --------------------
calibrate = False # Create a calibration image file with calibration hash markers 10 px per mark
# Crop Area for motion detection Tracking
# ---------------------------------------
x_left = 150 # Default= 150 Exclude event if x less than this px position
x_right = 490 # Default= 490 Exclude event if x greater than this px position
y_upper = 140 # Default= 140 Exclude event if y less that this value
y_lower = 340 # Default= 340 Exclude event if y greater than this value
# Motion Event Settings
# ---------------------
SPEED_MPH = False # Set the speed conversion kph=False mph=True
MIN_AREA = 200 # Default= 200 Exclude all contours less than or equal to this sq-px Area
track_len_trig = 75 # Default= 75 Length of track to trigger speed photo
x_diff_max = 18 # Default= 18 Exclude if max px away >= last motion event x pos
x_diff_min = 1 # Default= 1 Exclude if min px away <= last event x pos
track_timeout = 0.0 # Default= 0.0 Optional seconds to wait after track End (Avoid dual tracking)
event_timeout = 0.3 # Default= 0.3 seconds to wait for next motion event before starting new track
log_data_to_CSV = False # Default= False True = Save log data as CSV comma separated values
# Camera Settings
# ---------------
WEBCAM = False # Default= False False=PiCamera True=USB WebCamera
# Pi Camera Settings
# ------------------
CAMERA_WIDTH = 640 # Default= 640 Image stream width for opencv motion scanning default=320
CAMERA_HEIGHT = 480 # Default= 480 Image stream height for opencv motion scanning default=240
CAMERA_FRAMERATE = 20 # Default = 30 Frame rate for video stream V2 picam can be higher
# Camera Image Settings
# ---------------------
image_path = "media/security" # folder name to store images
image_prefix = "scam-" # image name prefix security camera
image_show_motion_area = False # True= Display motion detection rectangle area on saved images
image_filename_speed = False # True= Prefix filename with speed value
image_text_on = False # True= Show Text on speed images False= No Text on images
image_bigger = 1.5 # Default= 1.5 Resize saved speed image by value
image_font_size = 18 # Default= 18 Font text height in px for text on images
imageRecentMax = 10 # 0=off Maintain specified number of most recent files in motionRecentDir
imageRecentDir = "media/recent/security" # default= "media/recent" save recent files directory path
# Optional Manage SubDir Creation by time, number of files or both
# ----------------------------------------------------------------
imageSubDirMaxHours = 0 # 0=off or specify MaxHours - Creates New dated sub-folder if MaxHours exceeded
imageSubDirMaxFiles = 0 # 0=off or specify MaxFiles - Creates New dated sub-folder if MaxFiles exceeded
# ---------------------------------------------- End of User Variables -----------------------------------------------------
| [
"[email protected]"
]
| |
f94f70300297d6540a203b03e0a808f40fb78e99 | 3cedc7c1519d3b013aad9ec4e6a6ee7834da7589 | /selenium_code/z_practise/001/sa2.py | f978ca4ea1b40e7eda5051133e473ae0a9999596 | []
| no_license | hzrg/songqin_course | 53437100669ee93d2ac5ecae5de938b1a4007d7f | 05e422ce34a42fd6d3819722a19252f8005e79ed | refs/heads/master | 2022-02-09T13:27:59.871400 | 2019-06-13T06:08:45 | 2019-06-13T06:08:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,550 | py | # coding:utf8
from selenium import webdriver
# 导入浏览器驱动路径
executable_path = r"d:\tools\webdrivers\chromedriver.exe"
# 指定是chrome 的驱动
# 执行到这里的时候Selenium会去到指定的路径将chrome driver 程序运行起来
driver = webdriver.Chrome(executable_path)
# ------------------------
driver.get('http://www.weather.com.cn/html/province/jiangsu.shtml')
# 分析html发现 温度信息在forecastID 子元素dl里面
info = driver.find_element_by_id("forecastID")
# 再从 forecastID 元素获取所有子元素dl
dls = info.find_elements_by_tag_name('dl')
# 将城市和气温信息保存到列表citys中
citys = []
for dl in dls:
# print dl.get_attribute('innerHTML')
name = dl.find_element_by_tag_name('dt').text
# 最高最低气温位置会变,根据位置决定是span还是b
ltemp = dl.find_element_by_tag_name('b').text
ltemp = int(ltemp.replace(u'℃',''))
print(name, ltemp)
citys.append([name, ltemp])
lowest = 100
lowestCitys = [] # 温度最低城市列表
for one in citys:
curcity = one[0]
ltemp = one[1]
curlowweather = ltemp
# 发现气温更低的城市
if curlowweather<lowest:
lowest = curlowweather
lowestCitys = [curcity]
# 温度和当前最低相同,加入列表
elif curlowweather ==lowest:
lowestCitys.append(curcity)
print('温度最低为%s, 城市有%s' % (lowest, ','.join(lowestCitys)))
# ------------------------
driver.quit() | [
"[email protected]"
]
| |
eb1d56fce359772a0815850648aed190af310eb2 | 7c61922c2de52ea684a39a002355eff6551bf930 | /getcount.py | 33de2b911f0d1b8b0652e5e8f9650e44c86dcae2 | []
| no_license | DongDong-123/codewars | ac3e6b5d5dab78ef60140ac87b9c02cc8dba646c | 723750fed649ea763a2363604dd6dea3359216a8 | refs/heads/master | 2020-03-21T15:48:19.316417 | 2019-01-04T14:44:54 | 2019-01-04T14:44:54 | 138,733,757 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 318 | py | def getCount(inputStr):
#num_vowels = 0
inputStr = inputStr.lower()
vowels = ['a','e','i','o','u']
b = []
for i in inputStr:
b.append(i)
for j in b and vowels:
num_vowels = inputStr.count(i)
return num_vowels
#inputStr = "abracadabra"
a = getCount("abracadabra")
print(a)
| [
"[email protected]"
]
| |
cde20cb3464818d38c4f964502b21319c010bad4 | 2c872fedcdc12c89742d10c2f1c821eed0470726 | /pbase/day09/code/text_args.py | bf20be1ef127a5651f828ffc51ef6a1001634dcd | []
| no_license | zuigehulu/AID1811 | 581c3c7a37df9fa928bc632e4891fc9bafe69201 | 10cab0869875290646a9e5d815ff159d0116990e | refs/heads/master | 2020-04-19T16:33:04.174841 | 2019-01-30T07:58:24 | 2019-01-30T07:58:24 | 168,307,918 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | def mysum(*args):
return sum(args)
print(mysum(1,2))
print(mysum(1,2,3,4))
print(mysum(1,2,3,4,5,6,7,8))
| [
"[email protected]"
]
| |
fa79b596babef682f3b5914ffcc30d799205917c | 726ce8dddbb12af1662e002633bfe538ddf77708 | /PyOpenGL-2.0.2.01-py2.5-win32.egg/OpenGL/GL/SGIS/_multitexture.py | f56fd6209fbb3f8208e4edbfaed99bcb96da0c30 | []
| no_license | bopopescu/BCPy2000-1 | f9264bb020ba734be0bcc8e8173d2746b0f17eeb | 0f877075a846d17e7593222628e9fe49ab863039 | refs/heads/master | 2022-11-26T07:58:03.493727 | 2019-06-02T20:25:58 | 2019-06-02T20:25:58 | 282,195,357 | 0 | 0 | null | 2020-07-24T10:52:24 | 2020-07-24T10:52:24 | null | UTF-8 | Python | false | false | 284 | py | def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, imp
__file__ = pkg_resources.resource_filename(__name__,'_multitexture.pyd')
__loader__ = None; del __bootstrap__, __loader__
imp.load_dynamic(__name__,__file__)
__bootstrap__()
| [
"[email protected]"
]
| |
d6d43f7b6a42de881d32d0fdd6f7c873b79ac260 | 5ba006696ba7f79e99c1b08b2c540b4465f22f53 | /aiortc/rtcpeerconnection.py | 345ee3246f1a1b0dfa02b5c64d5897e6b46c494e | [
"BSD-3-Clause"
]
| permissive | shaunastarabadi/aiortc | 8305720b3972a142184efb69a7add827161598bb | b19674e4776bced784aab497db2a81af269e9024 | refs/heads/master | 2020-06-11T15:21:42.638437 | 2019-06-27T04:18:27 | 2019-06-27T04:18:27 | 194,009,757 | 0 | 0 | BSD-3-Clause | 2019-06-27T02:35:55 | 2019-06-27T02:35:55 | null | UTF-8 | Python | false | false | 39,141 | py | import asyncio
import copy
import uuid
from collections import OrderedDict
from pyee import EventEmitter
from . import clock, rtp, sdp
from .codecs import CODECS, HEADER_EXTENSIONS, is_rtx
from .events import RTCTrackEvent
from .exceptions import InternalError, InvalidAccessError, InvalidStateError
from .rtcconfiguration import RTCConfiguration
from .rtcdatachannel import RTCDataChannel, RTCDataChannelParameters
from .rtcdtlstransport import RTCCertificate, RTCDtlsTransport
from .rtcicetransport import RTCIceGatherer, RTCIceTransport
from .rtcrtpparameters import (
RTCRtpDecodingParameters,
RTCRtpParameters,
RTCRtpReceiveParameters,
RTCRtpRtxParameters,
)
from .rtcrtpreceiver import RemoteStreamTrack, RTCRtpReceiver
from .rtcrtpsender import RTCRtpSender
from .rtcrtptransceiver import RTCRtpTransceiver
from .rtcsctptransport import RTCSctpTransport
from .rtcsessiondescription import RTCSessionDescription
from .stats import RTCStatsReport
DISCARD_HOST = "0.0.0.0"
DISCARD_PORT = 9
MEDIA_KINDS = ["audio", "video"]
def filter_preferred_codecs(codecs, preferred):
if not preferred:
return codecs
rtx_codecs = list(filter(is_rtx, codecs))
rtx_enabled = next(filter(is_rtx, preferred), None) is not None
filtered = []
for pref in filter(lambda x: not is_rtx(x), preferred):
for codec in codecs:
if (
codec.mimeType.lower() == pref.mimeType.lower()
and codec.parameters == pref.parameters
):
filtered.append(codec)
# add corresponding RTX
if rtx_enabled:
for rtx in rtx_codecs:
if rtx.parameters["apt"] == codec.payloadType:
filtered.append(rtx)
break
break
return filtered
def find_common_codecs(local_codecs, remote_codecs):
common = []
common_base = {}
for c in remote_codecs:
# for RTX, check we accepted the base codec
if is_rtx(c):
if c.parameters.get("apt") in common_base:
base = common_base[c.parameters["apt"]]
if c.clockRate == base.clockRate:
common.append(copy.deepcopy(c))
continue
# handle other codecs
for codec in local_codecs:
if (
codec.mimeType.lower() == c.mimeType.lower()
and codec.clockRate == c.clockRate
):
if codec.mimeType.lower() == "video/h264":
# FIXME: check according to RFC 6184
parameters_compatible = True
for param in ["packetization-mode", "profile-level-id"]:
if c.parameters.get(param) != codec.parameters.get(param):
parameters_compatible = False
if not parameters_compatible:
continue
codec = copy.deepcopy(codec)
if c.payloadType in rtp.DYNAMIC_PAYLOAD_TYPES:
codec.payloadType = c.payloadType
codec.rtcpFeedback = list(
filter(lambda x: x in c.rtcpFeedback, codec.rtcpFeedback)
)
common.append(codec)
common_base[codec.payloadType] = codec
break
return common
def find_common_header_extensions(local_extensions, remote_extensions):
common = []
for rx in remote_extensions:
for lx in local_extensions:
if lx.uri == rx.uri:
common.append(rx)
return common
def add_transport_description(media, dtlsTransport):
# ice
iceTransport = dtlsTransport.transport
iceGatherer = iceTransport.iceGatherer
media.ice_candidates = iceGatherer.getLocalCandidates()
media.ice_candidates_complete = iceGatherer.state == "completed"
media.ice = iceGatherer.getLocalParameters()
if media.ice_candidates:
media.host = media.ice_candidates[0].ip
media.port = media.ice_candidates[0].port
else:
media.host = DISCARD_HOST
media.port = DISCARD_PORT
# dtls
media.dtls = dtlsTransport.getLocalParameters()
if iceTransport.role == "controlling":
media.dtls.role = "auto"
else:
media.dtls.role = "client"
def add_remote_candidates(iceTransport, media):
for candidate in media.ice_candidates:
iceTransport.addRemoteCandidate(candidate)
if media.ice_candidates_complete:
iceTransport.addRemoteCandidate(None)
def allocate_mid(mids):
"""
Allocate a MID which has not been used yet.
"""
i = 0
while True:
mid = str(i)
if mid not in mids:
mids.add(mid)
return mid
i += 1
def create_media_description_for_sctp(sctp, legacy, mid):
if legacy:
media = sdp.MediaDescription(
kind="application", port=DISCARD_PORT, profile="DTLS/SCTP", fmt=[sctp.port]
)
media.sctpmap[sctp.port] = (
"webrtc-datachannel %d" % sctp._outbound_streams_count
)
else:
media = sdp.MediaDescription(
kind="application",
port=DISCARD_PORT,
profile="UDP/DTLS/SCTP",
fmt=["webrtc-datachannel"],
)
media.sctp_port = sctp.port
media.rtp.muxId = mid
media.sctpCapabilities = sctp.getCapabilities()
add_transport_description(media, sctp.transport)
return media
def create_media_description_for_transceiver(transceiver, cname, direction, mid):
media = sdp.MediaDescription(
kind=transceiver.kind,
port=DISCARD_PORT,
profile="UDP/TLS/RTP/SAVPF",
fmt=[c.payloadType for c in transceiver._codecs],
)
media.direction = direction
media.msid = "%s %s" % (transceiver.sender._stream_id, transceiver.sender._track_id)
media.rtp = RTCRtpParameters(
codecs=transceiver._codecs,
headerExtensions=transceiver._headerExtensions,
muxId=mid,
)
media.rtcp_host = DISCARD_HOST
media.rtcp_port = DISCARD_PORT
media.rtcp_mux = True
media.ssrc = [sdp.SsrcDescription(ssrc=transceiver.sender._ssrc, cname=cname)]
# if RTX is enabled, add corresponding SSRC
if next(filter(is_rtx, media.rtp.codecs), None):
media.ssrc.append(
sdp.SsrcDescription(ssrc=transceiver.sender._rtx_ssrc, cname=cname)
)
media.ssrc_group = [
sdp.GroupDescription(
semantic="FID",
items=[transceiver.sender._ssrc, transceiver.sender._rtx_ssrc],
)
]
add_transport_description(media, transceiver._transport)
return media
def and_direction(a, b):
return sdp.DIRECTIONS[sdp.DIRECTIONS.index(a) & sdp.DIRECTIONS.index(b)]
def or_direction(a, b):
return sdp.DIRECTIONS[sdp.DIRECTIONS.index(a) | sdp.DIRECTIONS.index(b)]
def reverse_direction(direction):
if direction == "sendonly":
return "recvonly"
elif direction == "recvonly":
return "sendonly"
return direction
def wrap_session_description(session_description: sdp.SessionDescription):
if session_description is not None:
return RTCSessionDescription(
sdp=str(session_description), type=session_description.type
)
class RTCPeerConnection(EventEmitter):
"""
The :class:`RTCPeerConnection` interface represents a WebRTC connection
between the local computer and a remote peer.
:param: configuration: An optional :class:`RTCConfiguration`.
"""
def __init__(self, configuration=None):
super().__init__()
self.__certificates = [RTCCertificate.generateCertificate()]
self.__cname = "{%s}" % uuid.uuid4()
self.__configuration = configuration or RTCConfiguration()
self.__iceTransports = set()
self.__initialOfferer = None
self.__remoteDtls = {}
self.__remoteIce = {}
self.__seenMids = set()
self.__sctp = None
self.__sctp_mline_index = None
self._sctpLegacySdp = True
self.__sctpRemotePort = None
self.__sctpRemoteCaps = None
self.__stream_id = str(uuid.uuid4())
self.__transceivers = []
self.__iceConnectionState = "new"
self.__iceGatheringState = "new"
self.__isClosed = False
self.__signalingState = "stable"
self.__currentLocalDescription = None # type: sdp.SessionDescription
self.__currentRemoteDescription = None # type: sdp.SessionDescription
self.__pendingLocalDescription = None # type: sdp.SessionDescription
self.__pendingRemoteDescription = None # type: sdp.SessionDescription
@property
def iceConnectionState(self):
return self.__iceConnectionState
@property
def iceGatheringState(self):
return self.__iceGatheringState
@property
def localDescription(self):
"""
An :class:`RTCSessionDescription` describing the session for
the local end of the connection.
"""
return wrap_session_description(self.__localDescription())
@property
def remoteDescription(self):
"""
An :class:`RTCSessionDescription` describing the session for
the remote end of the connection.
"""
return wrap_session_description(self.__remoteDescription())
@property
def sctp(self):
"""
An :class:`RTCSctpTransport` describing the SCTP transport being used
for datachannels or `None`.
"""
return self.__sctp
@property
def signalingState(self):
return self.__signalingState
def addIceCandidate(self, candidate):
"""
Add a new :class:`RTCIceCandidate` received from the remote peer.
The specified candidate must have a value for either `sdpMid` or `sdpMLineIndex`.
"""
if candidate.sdpMid is None and candidate.sdpMLineIndex is None:
raise ValueError("Candidate must have either sdpMid or sdpMLineIndex")
for transceiver in self.__transceivers:
if candidate.sdpMid == transceiver.mid and not transceiver._bundled:
iceTransport = transceiver._transport.transport
iceTransport.addRemoteCandidate(candidate)
return
if (
self.__sctp
and candidate.sdpMid == self.__sctp.mid
and not self.__sctp._bundled
):
iceTransport = self.__sctp.transport.transport
iceTransport.addRemoteCandidate(candidate)
def addTrack(self, track):
"""
Add a :class:`MediaStreamTrack` to the set of media tracks which
will be transmitted to the remote peer.
"""
# check state is valid
self.__assertNotClosed()
if track.kind not in ["audio", "video"]:
raise InternalError('Invalid track kind "%s"' % track.kind)
# don't add track twice
self.__assertTrackHasNoSender(track)
for transceiver in self.__transceivers:
if transceiver.kind == track.kind:
if transceiver.sender.track is None:
transceiver.sender.replaceTrack(track)
transceiver.direction = or_direction(
transceiver.direction, "sendonly"
)
return transceiver.sender
transceiver = self.__createTransceiver(
direction="sendrecv", kind=track.kind, sender_track=track
)
return transceiver.sender
def addTransceiver(self, trackOrKind, direction="sendrecv"):
"""
Add a new :class:`RTCRtpTransceiver`.
"""
self.__assertNotClosed()
# determine track or kind
if hasattr(trackOrKind, "kind"):
kind = trackOrKind.kind
track = trackOrKind
else:
kind = trackOrKind
track = None
if kind not in ["audio", "video"]:
raise InternalError('Invalid track kind "%s"' % kind)
# check direction
if direction not in sdp.DIRECTIONS:
raise InternalError('Invalid direction "%s"' % direction)
# don't add track twice
if track:
self.__assertTrackHasNoSender(track)
return self.__createTransceiver(
direction=direction, kind=kind, sender_track=track
)
async def close(self):
"""
Terminate the ICE agent, ending ICE processing and streams.
"""
if self.__isClosed:
return
self.__isClosed = True
self.__setSignalingState("closed")
# stop senders / receivers
for transceiver in self.__transceivers:
await transceiver.stop()
if self.__sctp:
await self.__sctp.stop()
# stop transports
for transceiver in self.__transceivers:
await transceiver._transport.stop()
await transceiver._transport.transport.stop()
if self.__sctp:
await self.__sctp.transport.stop()
await self.__sctp.transport.transport.stop()
self.__updateIceConnectionState()
# no more events will be emitted, so remove all event listeners
# to facilitate garbage collection.
self.remove_all_listeners()
async def createAnswer(self):
"""
Create an SDP answer to an offer received from a remote peer during
the offer/answer negotiation of a WebRTC connection.
:rtype: :class:`RTCSessionDescription`
"""
# check state is valid
self.__assertNotClosed()
if self.signalingState not in ["have-remote-offer", "have-local-pranswer"]:
raise InvalidStateError(
'Cannot create answer in signaling state "%s"' % self.signalingState
)
# create description
ntp_seconds = clock.current_ntp_time() >> 32
description = sdp.SessionDescription()
description.origin = "- %d %d IN IP4 0.0.0.0" % (ntp_seconds, ntp_seconds)
description.msid_semantic.append(
sdp.GroupDescription(semantic="WMS", items=["*"])
)
description.type = "answer"
for remote_m in self.__remoteDescription().media:
if remote_m.kind in ["audio", "video"]:
transceiver = self.__getTransceiverByMid(remote_m.rtp.muxId)
description.media.append(
create_media_description_for_transceiver(
transceiver,
cname=self.__cname,
direction=and_direction(
transceiver.direction, transceiver._offerDirection
),
mid=transceiver.mid,
)
)
else:
description.media.append(
create_media_description_for_sctp(
self.__sctp, legacy=self._sctpLegacySdp, mid=self.__sctp.mid
)
)
bundle = sdp.GroupDescription(semantic="BUNDLE", items=[])
for media in description.media:
bundle.items.append(media.rtp.muxId)
description.group.append(bundle)
return wrap_session_description(description)
def createDataChannel(
self,
label,
maxPacketLifeTime=None,
maxRetransmits=None,
ordered=True,
protocol="",
negotiated=False,
id=None,
):
"""
Create a data channel with the given label.
:rtype: :class:`RTCDataChannel`
"""
if maxPacketLifeTime is not None and maxRetransmits is not None:
raise ValueError("Cannot specify both maxPacketLifeTime and maxRetransmits")
if not self.__sctp:
self.__createSctpTransport()
parameters = RTCDataChannelParameters(
id=id,
label=label,
maxPacketLifeTime=maxPacketLifeTime,
maxRetransmits=maxRetransmits,
negotiated=negotiated,
ordered=ordered,
protocol=protocol,
)
return RTCDataChannel(self.__sctp, parameters)
async def createOffer(self):
"""
Create an SDP offer for the purpose of starting a new WebRTC
connection to a remote peer.
:rtype: :class:`RTCSessionDescription`
"""
# check state is valid
self.__assertNotClosed()
if not self.__sctp and not self.__transceivers:
raise InternalError(
"Cannot create an offer with no media and no data channels"
)
# offer codecs
for transceiver in self.__transceivers:
transceiver._codecs = filter_preferred_codecs(
CODECS[transceiver.kind][:], transceiver._preferred_codecs
)
transceiver._headerExtensions = HEADER_EXTENSIONS[transceiver.kind][:]
mids = self.__seenMids.copy()
# create description
ntp_seconds = clock.current_ntp_time() >> 32
description = sdp.SessionDescription()
description.origin = "- %d %d IN IP4 0.0.0.0" % (ntp_seconds, ntp_seconds)
description.msid_semantic.append(
sdp.GroupDescription(semantic="WMS", items=["*"])
)
description.type = "offer"
def get_media(description):
return description.media if description else []
def get_media_section(media, i):
return media[i] if i < len(media) else None
# handle existing transceivers / sctp
local_media = get_media(self.__localDescription())
remote_media = get_media(self.__remoteDescription())
for i in range(max(len(local_media), len(remote_media))):
local_m = get_media_section(local_media, i)
remote_m = get_media_section(remote_media, i)
media_kind = local_m.kind if local_m else remote_m.kind
mid = local_m.rtp.muxId if local_m else remote_m.rtp.muxId
if media_kind in ["audio", "video"]:
transceiver = self.__getTransceiverByMid(mid)
transceiver._set_mline_index(i)
description.media.append(
create_media_description_for_transceiver(
transceiver,
cname=self.__cname,
direction=transceiver.direction,
mid=mid,
)
)
elif media_kind == "application":
self.__sctp_mline_index = i
description.media.append(
create_media_description_for_sctp(
self.__sctp, legacy=self._sctpLegacySdp, mid=mid
)
)
# handle new transceivers / sctp
def next_mline_index():
return len(description.media)
for transceiver in filter(
lambda x: x.mid is None and not x.stopped, self.__transceivers
):
transceiver._set_mline_index(next_mline_index())
description.media.append(
create_media_description_for_transceiver(
transceiver,
cname=self.__cname,
direction=transceiver.direction,
mid=allocate_mid(mids),
)
)
if self.__sctp and self.__sctp.mid is None:
self.__sctp_mline_index = next_mline_index()
description.media.append(
create_media_description_for_sctp(
self.__sctp, legacy=self._sctpLegacySdp, mid=allocate_mid(mids)
)
)
bundle = sdp.GroupDescription(semantic="BUNDLE", items=[])
for media in description.media:
bundle.items.append(media.rtp.muxId)
description.group.append(bundle)
return wrap_session_description(description)
def getReceivers(self):
"""
Returns the list of :class:`RTCRtpReceiver` objects that are currently
attached to the connection.
"""
return list(map(lambda x: x.receiver, self.__transceivers))
def getSenders(self):
"""
Returns the list of :class:`RTCRtpSender` objects that are currently
attached to the connection.
"""
return list(map(lambda x: x.sender, self.__transceivers))
async def getStats(self):
"""
Returns statistics for the connection.
:rtype: :class:`RTCStatsReport`
"""
merged = RTCStatsReport()
coros = [x.getStats() for x in (self.getSenders() + self.getReceivers())]
for report in await asyncio.gather(*coros):
merged.update(report)
return merged
def getTransceivers(self):
"""
Returns the list of :class:`RTCRtpTransceiver` objects that are currently
attached to the connection.
"""
return list(self.__transceivers)
async def setLocalDescription(self, sessionDescription):
"""
Change the local description associated with the connection.
:param: sessionDescription: An :class:`RTCSessionDescription` generated
by :meth:`createOffer` or :meth:`createAnswer()`.
"""
# parse and validate description
description = sdp.SessionDescription.parse(sessionDescription.sdp)
description.type = sessionDescription.type
self.__validate_description(description, is_local=True)
# update signaling state
if description.type == "offer":
self.__setSignalingState("have-local-offer")
elif description.type == "answer":
self.__setSignalingState("stable")
# assign MID
for i, media in enumerate(description.media):
mid = media.rtp.muxId
self.__seenMids.add(mid)
if media.kind in ["audio", "video"]:
transceiver = self.__getTransceiverByMLineIndex(i)
transceiver._set_mid(mid)
elif media.kind == "application":
self.__sctp.mid = mid
# set ICE role
if self.__initialOfferer is None:
self.__initialOfferer = description.type == "offer"
for iceTransport in self.__iceTransports:
iceTransport._connection.ice_controlling = self.__initialOfferer
# configure direction
for t in self.__transceivers:
if description.type in ["answer", "pranswer"]:
t._currentDirection = and_direction(t.direction, t._offerDirection)
# gather candidates
await self.__gather()
for i, media in enumerate(description.media):
if media.kind in ["audio", "video"]:
transceiver = self.__getTransceiverByMLineIndex(i)
add_transport_description(media, transceiver._transport)
elif media.kind == "application":
add_transport_description(media, self.__sctp.transport)
# connect
asyncio.ensure_future(self.__connect())
# replace description
if description.type == "answer":
self.__currentLocalDescription = description
self.__pendingLocalDescription = None
else:
self.__pendingLocalDescription = description
async def setRemoteDescription(self, sessionDescription):
"""
Changes the remote description associated with the connection.
:param: sessionDescription: An :class:`RTCSessionDescription` created from
information received over the signaling channel.
"""
# parse and validate description
description = sdp.SessionDescription.parse(sessionDescription.sdp)
description.type = sessionDescription.type
self.__validate_description(description, is_local=False)
# apply description
trackEvents = []
for i, media in enumerate(description.media):
self.__seenMids.add(media.rtp.muxId)
if media.kind in ["audio", "video"]:
# find transceiver
transceiver = None
for t in self.__transceivers:
if t.kind == media.kind and t.mid in [None, media.rtp.muxId]:
transceiver = t
if transceiver is None:
transceiver = self.__createTransceiver(
direction="recvonly", kind=media.kind
)
if transceiver.mid is None:
transceiver._set_mid(media.rtp.muxId)
transceiver._set_mline_index(i)
# negotiate codecs
common = filter_preferred_codecs(
find_common_codecs(CODECS[media.kind], media.rtp.codecs),
transceiver._preferred_codecs,
)
assert len(common)
transceiver._codecs = common
transceiver._headerExtensions = find_common_header_extensions(
HEADER_EXTENSIONS[media.kind], media.rtp.headerExtensions
)
# configure transport
iceTransport = transceiver._transport.transport
add_remote_candidates(iceTransport, media)
self.__remoteDtls[transceiver] = media.dtls
self.__remoteIce[transceiver] = media.ice
# configure direction
direction = reverse_direction(media.direction)
if description.type in ["answer", "pranswer"]:
transceiver._currentDirection = direction
else:
transceiver._offerDirection = direction
# create remote stream track
if (
direction in ["recvonly", "sendrecv"]
and not transceiver.receiver._track
):
transceiver.receiver._track = RemoteStreamTrack(kind=media.kind)
trackEvents.append(
RTCTrackEvent(
receiver=transceiver.receiver,
track=transceiver.receiver._track,
transceiver=transceiver,
)
)
elif media.kind == "application":
if not self.__sctp:
self.__createSctpTransport()
if self.__sctp.mid is None:
self.__sctp.mid = media.rtp.muxId
self.__sctp_mline_index = i
# configure sctp
if media.profile == "DTLS/SCTP":
self._sctpLegacySdp = True
self.__sctpRemotePort = int(media.fmt[0])
else:
self._sctpLegacySdp = False
self.__sctpRemotePort = media.sctp_port
self.__sctpRemoteCaps = media.sctpCapabilities
# configure transport
iceTransport = self.__sctp.transport.transport
add_remote_candidates(iceTransport, media)
self.__remoteDtls[self.__sctp] = media.dtls
self.__remoteIce[self.__sctp] = media.ice
# remove bundled transports
bundle = next((x for x in description.group if x.semantic == "BUNDLE"), None)
if bundle and bundle.items:
# find main media stream
masterMid = bundle.items[0]
masterTransport = None
for transceiver in self.__transceivers:
if transceiver.mid == masterMid:
masterTransport = transceiver._transport
break
if self.__sctp and self.__sctp.mid == masterMid:
masterTransport = self.__sctp.transport
# replace transport for bundled media
oldTransports = set()
slaveMids = bundle.items[1:]
for transceiver in self.__transceivers:
if transceiver.mid in slaveMids and not transceiver._bundled:
oldTransports.add(transceiver._transport)
transceiver.receiver.setTransport(masterTransport)
transceiver.sender.setTransport(masterTransport)
transceiver._bundled = True
transceiver._transport = masterTransport
if self.__sctp and self.__sctp.mid in slaveMids:
oldTransports.add(self.__sctp.transport)
self.__sctp.setTransport(masterTransport)
self.__sctp._bundled = True
# stop and discard old ICE transports
for dtlsTransport in oldTransports:
await dtlsTransport.stop()
await dtlsTransport.transport.stop()
self.__iceTransports.discard(dtlsTransport.transport)
self.__updateIceGatheringState()
self.__updateIceConnectionState()
# FIXME: in aiortc 1.0.0 emit RTCTrackEvent directly
for event in trackEvents:
self.emit("track", event.track)
# connect
asyncio.ensure_future(self.__connect())
# update signaling state
if description.type == "offer":
self.__setSignalingState("have-remote-offer")
elif description.type == "answer":
self.__setSignalingState("stable")
# replace description
if description.type == "answer":
self.__currentRemoteDescription = description
self.__pendingRemoteDescription = None
else:
self.__pendingRemoteDescription = description
async def __connect(self):
for transceiver in self.__transceivers:
dtlsTransport = transceiver._transport
iceTransport = dtlsTransport.transport
if (
iceTransport.iceGatherer.getLocalCandidates()
and transceiver in self.__remoteIce
):
await iceTransport.start(self.__remoteIce[transceiver])
if dtlsTransport.state == "new":
await dtlsTransport.start(self.__remoteDtls[transceiver])
if dtlsTransport.state == "connected":
if transceiver.currentDirection in ["sendonly", "sendrecv"]:
await transceiver.sender.send(self.__localRtp(transceiver))
if transceiver.currentDirection in ["recvonly", "sendrecv"]:
await transceiver.receiver.receive(
self.__remoteRtp(transceiver)
)
if self.__sctp:
dtlsTransport = self.__sctp.transport
iceTransport = dtlsTransport.transport
if (
iceTransport.iceGatherer.getLocalCandidates()
and self.__sctp in self.__remoteIce
):
await iceTransport.start(self.__remoteIce[self.__sctp])
if dtlsTransport.state == "new":
await dtlsTransport.start(self.__remoteDtls[self.__sctp])
if dtlsTransport.state == "connected":
await self.__sctp.start(
self.__sctpRemoteCaps, self.__sctpRemotePort
)
async def __gather(self):
coros = map(lambda t: t.iceGatherer.gather(), self.__iceTransports)
await asyncio.gather(*coros)
def __assertNotClosed(self):
if self.__isClosed:
raise InvalidStateError("RTCPeerConnection is closed")
def __assertTrackHasNoSender(self, track):
for sender in self.getSenders():
if sender.track == track:
raise InvalidAccessError("Track already has a sender")
def __createDtlsTransport(self):
# create ICE transport
iceGatherer = RTCIceGatherer(iceServers=self.__configuration.iceServers)
iceGatherer.on("statechange", self.__updateIceGatheringState)
iceTransport = RTCIceTransport(iceGatherer)
iceTransport.on("statechange", self.__updateIceConnectionState)
self.__iceTransports.add(iceTransport)
# update states
self.__updateIceGatheringState()
self.__updateIceConnectionState()
return RTCDtlsTransport(iceTransport, self.__certificates)
def __createSctpTransport(self):
self.__sctp = RTCSctpTransport(self.__createDtlsTransport())
self.__sctp._bundled = False
self.__sctp.mid = None
@self.__sctp.on("datachannel")
def on_datachannel(channel):
self.emit("datachannel", channel)
def __createTransceiver(self, direction, kind, sender_track=None):
dtlsTransport = self.__createDtlsTransport()
transceiver = RTCRtpTransceiver(
direction=direction,
kind=kind,
sender=RTCRtpSender(sender_track or kind, dtlsTransport),
receiver=RTCRtpReceiver(kind, dtlsTransport),
)
transceiver.receiver._set_rtcp_ssrc(transceiver.sender._ssrc)
transceiver.sender._stream_id = self.__stream_id
transceiver._bundled = False
transceiver._transport = dtlsTransport
self.__transceivers.append(transceiver)
return transceiver
def __getTransceiverByMid(self, mid):
return next(filter(lambda x: x.mid == mid, self.__transceivers), None)
def __getTransceiverByMLineIndex(self, index):
return next(
filter(lambda x: x._get_mline_index() == index, self.__transceivers), None
)
def __localDescription(self):
return self.__pendingLocalDescription or self.__currentLocalDescription
def __localRtp(self, transceiver):
rtp = RTCRtpParameters(
codecs=transceiver._codecs,
headerExtensions=transceiver._headerExtensions,
muxId=transceiver.mid,
)
rtp.rtcp.cname = self.__cname
rtp.rtcp.ssrc = transceiver.sender._ssrc
rtp.rtcp.mux = True
return rtp
def __remoteDescription(self):
return self.__pendingRemoteDescription or self.__currentRemoteDescription
def __remoteRtp(self, transceiver):
media = self.__remoteDescription().media[transceiver._get_mline_index()]
receiveParameters = RTCRtpReceiveParameters(
codecs=transceiver._codecs,
headerExtensions=transceiver._headerExtensions,
muxId=media.rtp.muxId,
rtcp=media.rtp.rtcp,
)
if len(media.ssrc):
encodings = OrderedDict()
for codec in transceiver._codecs:
if is_rtx(codec):
if codec.parameters["apt"] in encodings and len(media.ssrc) == 2:
encodings[codec.parameters["apt"]].rtx = RTCRtpRtxParameters(
ssrc=media.ssrc[1].ssrc
)
continue
encodings[codec.payloadType] = RTCRtpDecodingParameters(
ssrc=media.ssrc[0].ssrc, payloadType=codec.payloadType
)
receiveParameters.encodings = list(encodings.values())
return receiveParameters
def __setSignalingState(self, state):
self.__signalingState = state
self.emit("signalingstatechange")
def __updateIceConnectionState(self):
# compute new state
states = set(map(lambda x: x.state, self.__iceTransports))
if self.__isClosed:
state = "closed"
elif "failed" in states:
state = "failed"
elif states == set(["completed"]):
state = "completed"
elif "checking" in states:
state = "checking"
else:
state = "new"
# update state
if state != self.__iceConnectionState:
self.__iceConnectionState = state
self.emit("iceconnectionstatechange")
def __updateIceGatheringState(self):
# compute new state
states = set(map(lambda x: x.iceGatherer.state, self.__iceTransports))
if states == set(["completed"]):
state = "complete"
elif "gathering" in states:
state = "gathering"
else:
state = "new"
# update state
if state != self.__iceGatheringState:
self.__iceGatheringState = state
self.emit("icegatheringstatechange")
def __validate_description(self, description, is_local):
# check description is compatible with signaling state
if is_local:
if description.type == "offer":
if self.signalingState not in ["stable", "have-local-offer"]:
raise InvalidStateError(
'Cannot handle offer in signaling state "%s"'
% self.signalingState
)
elif description.type == "answer":
if self.signalingState not in [
"have-remote-offer",
"have-local-pranswer",
]:
raise InvalidStateError(
'Cannot handle answer in signaling state "%s"'
% self.signalingState
)
else:
if description.type == "offer":
if self.signalingState not in ["stable", "have-remote-offer"]:
raise InvalidStateError(
'Cannot handle offer in signaling state "%s"'
% self.signalingState
)
elif description.type == "answer":
if self.signalingState not in [
"have-local-offer",
"have-remote-pranswer",
]:
raise InvalidStateError(
'Cannot handle answer in signaling state "%s"'
% self.signalingState
)
for media in description.media:
# check ICE credentials were provided
if not media.ice.usernameFragment or not media.ice.password:
raise ValueError("ICE username fragment or password is missing")
# check RTCP mux is used
if media.kind in ["audio", "video"] and not media.rtcp_mux:
raise ValueError("RTCP mux is not enabled")
# check the number of media section matches
if description.type in ["answer", "pranswer"]:
offer = (
self.__remoteDescription() if is_local else self.__localDescription()
)
offer_media = [(media.kind, media.rtp.muxId) for media in offer.media]
answer_media = [
(media.kind, media.rtp.muxId) for media in description.media
]
if answer_media != offer_media:
raise ValueError("Media sections in answer do not match offer")
| [
"[email protected]"
]
| |
a2b0d9876b409aa030d60fd036b25a4a456322eb | 6bfda75657070e177fa620a43c917096cbd3c550 | /kubernetes/client/models/v1_quobyte_volume_source.py | c6a908ad799fb3900dd6206276075c0813fbba3b | [
"Apache-2.0"
]
| permissive | don41382/client-python | 8e7e747a62f9f4fc0402eea1a877eab1bb80ab36 | e69d4fe204b98f7d7ee3ada3996b4f5fbceae5fe | refs/heads/master | 2021-01-19T23:15:50.172933 | 2017-04-18T18:00:48 | 2017-04-18T18:00:48 | 88,943,866 | 0 | 0 | null | 2017-04-21T05:19:52 | 2017-04-21T05:19:52 | null | UTF-8 | Python | false | false | 6,504 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1QuobyteVolumeSource(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, group=None, read_only=None, registry=None, user=None, volume=None):
"""
V1QuobyteVolumeSource - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'group': 'str',
'read_only': 'bool',
'registry': 'str',
'user': 'str',
'volume': 'str'
}
self.attribute_map = {
'group': 'group',
'read_only': 'readOnly',
'registry': 'registry',
'user': 'user',
'volume': 'volume'
}
self._group = group
self._read_only = read_only
self._registry = registry
self._user = user
self._volume = volume
@property
def group(self):
"""
Gets the group of this V1QuobyteVolumeSource.
Group to map volume access to Default is no group
:return: The group of this V1QuobyteVolumeSource.
:rtype: str
"""
return self._group
@group.setter
def group(self, group):
"""
Sets the group of this V1QuobyteVolumeSource.
Group to map volume access to Default is no group
:param group: The group of this V1QuobyteVolumeSource.
:type: str
"""
self._group = group
@property
def read_only(self):
"""
Gets the read_only of this V1QuobyteVolumeSource.
ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.
:return: The read_only of this V1QuobyteVolumeSource.
:rtype: bool
"""
return self._read_only
@read_only.setter
def read_only(self, read_only):
"""
Sets the read_only of this V1QuobyteVolumeSource.
ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.
:param read_only: The read_only of this V1QuobyteVolumeSource.
:type: bool
"""
self._read_only = read_only
@property
def registry(self):
"""
Gets the registry of this V1QuobyteVolumeSource.
Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes
:return: The registry of this V1QuobyteVolumeSource.
:rtype: str
"""
return self._registry
@registry.setter
def registry(self, registry):
"""
Sets the registry of this V1QuobyteVolumeSource.
Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes
:param registry: The registry of this V1QuobyteVolumeSource.
:type: str
"""
if registry is None:
raise ValueError("Invalid value for `registry`, must not be `None`")
self._registry = registry
@property
def user(self):
"""
Gets the user of this V1QuobyteVolumeSource.
User to map volume access to Defaults to serivceaccount user
:return: The user of this V1QuobyteVolumeSource.
:rtype: str
"""
return self._user
@user.setter
def user(self, user):
"""
Sets the user of this V1QuobyteVolumeSource.
User to map volume access to Defaults to serivceaccount user
:param user: The user of this V1QuobyteVolumeSource.
:type: str
"""
self._user = user
@property
def volume(self):
"""
Gets the volume of this V1QuobyteVolumeSource.
Volume is a string that references an already created Quobyte volume by name.
:return: The volume of this V1QuobyteVolumeSource.
:rtype: str
"""
return self._volume
@volume.setter
def volume(self, volume):
"""
Sets the volume of this V1QuobyteVolumeSource.
Volume is a string that references an already created Quobyte volume by name.
:param volume: The volume of this V1QuobyteVolumeSource.
:type: str
"""
if volume is None:
raise ValueError("Invalid value for `volume`, must not be `None`")
self._volume = volume
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
]
| |
b2ab8d47998ea4e78f6b52de16d4b8b57ba4020a | cd052f960846ea33e22abdded3106fb492f16c31 | /爬虫项目/code11/Tencent/Tencent/middlewares.py | f313fa007a063eec3e1f4f43302226f7dbe1aa01 | []
| no_license | byst4nder/his_spider | 2d96457b70894c36506e8061d8a3201ac337a5d0 | a51e31acff41292e568ac22b0e213e6cb48218fa | refs/heads/master | 2020-07-21T12:06:28.952083 | 2019-09-06T14:25:58 | 2019-09-06T14:25:58 | 206,857,595 | 1 | 0 | null | 2019-09-06T19:04:02 | 2019-09-06T19:04:02 | null | UTF-8 | Python | false | false | 1,284 | py | #coding:utf-8
import random
import requests
from fake_useragent import UserAgent
from settings import USER_AGENT_LIST
class RandomUserAgentMiddleware(object):
def __init__(self):
self.ua_obj = UserAgent()
def process_request(self, request, spider):
#user_agent = random.choice(USER_AGENT_LIST)
user_agent = self.ua_obj.random
request.headers["User-Agent"] = user_agent
print('---' * 10)
print(request.headers)
# 在中间件里不需要写return操作
# return request
class RandomProxyMiddleware(object):
def __init__(self):
self.proxy_url = "http://kps.kdlapi.com/api/getkps/?orderid=914194268627142&num=1&pt=1&sep=1"
# 获取代理服务器里提供的proxy
self.proxy_list = [requests.get(self.proxy_url).content]
self.count = 0
def process_request(self, request, spider):
if self.count < 20:
proxy = random.choice(self.proxy_list)
#http://47.99.65.91:16818
# http://maozhaojun:[email protected]:16818
request.meta['proxy'] = "http://maozhaojun:ntkn0npx@" + proxy
self.count += 1
else:
self.proxy_list = [requests.get(self.proxy_url).content]
self.count = 0
| [
"[email protected]"
]
| |
d795d34961b9c42afe0703c20a4e6eeb5855f39a | 21b39d50e4df56ea01453001845d1580729af1df | /jdcloud_sdk/services/cdn/apis/SetDomainConfigRequest.py | 1e000f49f2dff6150f0a5cf3e6fc819eb5b40be3 | [
"Apache-2.0"
]
| permissive | Tanc009/jdcloud-sdk-python | ef46eac7731aa8a1839b1fc1efd93249b7a977f0 | 8b045c99bc5b73ca7348e950b6f01e03a27982f5 | refs/heads/master | 2021-08-09T14:49:16.177709 | 2021-06-25T02:38:41 | 2021-06-25T02:38:41 | 141,714,695 | 0 | 0 | Apache-2.0 | 2018-07-20T13:21:17 | 2018-07-20T13:21:16 | null | UTF-8 | Python | false | false | 2,404 | py | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class SetDomainConfigRequest(JDCloudRequest):
"""
更新域名配置
"""
def __init__(self, parameters, header=None, version="v1"):
super(SetDomainConfigRequest, self).__init__(
'/domain/{domain}/config', 'POST', header, version)
self.parameters = parameters
class SetDomainConfigParameters(object):
def __init__(self, domain, ):
"""
:param domain: 用户域名
"""
self.domain = domain
self.httpType = None
self.backSourceType = None
self.jumpType = None
self.jcdnTimeAnti = None
self.hdrCtrl = None
self.toutiaoHeader = None
def setHttpType(self, httpType):
"""
:param httpType: (Optional) http类型,只能为http或者https
"""
self.httpType = httpType
def setBackSourceType(self, backSourceType):
"""
:param backSourceType: (Optional) 回源类型
"""
self.backSourceType = backSourceType
def setJumpType(self, jumpType):
"""
:param jumpType: (Optional) 有三种类型:default、http、https
"""
self.jumpType = jumpType
def setJcdnTimeAnti(self, jcdnTimeAnti):
"""
:param jcdnTimeAnti: (Optional) dash鉴权相关配置
"""
self.jcdnTimeAnti = jcdnTimeAnti
def setHdrCtrl(self, hdrCtrl):
"""
:param hdrCtrl: (Optional) 回源鉴权相关配置
"""
self.hdrCtrl = hdrCtrl
def setToutiaoHeader(self, toutiaoHeader):
"""
:param toutiaoHeader: (Optional) 头条header配置
"""
self.toutiaoHeader = toutiaoHeader
| [
"[email protected]"
]
| |
e962b54ec262cb0e8a2b1e534a1193f362ac6c0e | 6e8d58340f2be5f00d55e2629052c0bbc9dcf390 | /lib/galaxy/datatypes/converters/fastqsolexa_to_fasta_converter.py | 1b68b3f6a2a340f24a5357a700a8e9995715fcc1 | [
"CC-BY-2.5",
"MIT"
]
| permissive | JCVI-Cloud/galaxy-tools-prok | e57389750d33ac766e1658838cdb0aaf9a59c106 | 3c44ecaf4b2e1f2d7269eabef19cbd2e88b3a99c | refs/heads/master | 2021-05-02T06:23:05.414371 | 2014-03-21T18:12:43 | 2014-03-21T18:12:43 | 6,092,693 | 0 | 2 | NOASSERTION | 2020-07-25T20:38:17 | 2012-10-05T15:57:38 | Python | UTF-8 | Python | false | false | 1,781 | py | #!/usr/bin/env python
"""
convert fastqsolexa file to separated sequence and quality files.
assume each sequence and quality score are contained in one line
the order should be:
1st line: @title_of_seq
2nd line: nucleotides
3rd line: +title_of_qualityscore (might be skipped)
4th line: quality scores
(in three forms: a. digits, b. ASCII codes, the first char as the coding base, c. ASCII codes without the first char.)
Usage:
%python fastqsolexa_to_fasta_converter.py <your_fastqsolexa_filename> <output_seq_filename> <output_score_filename>
"""
import sys, os
from math import *
assert sys.version_info[:2] >= ( 2, 4 )
def stop_err( msg ):
sys.stderr.write( "%s" % msg )
sys.exit()
def __main__():
infile_name = sys.argv[1]
outfile = open( sys.argv[2], 'w' )
fastq_block_lines = 0
seq_title_startswith = ''
for i, line in enumerate( file( infile_name ) ):
line = line.rstrip() # eliminate trailing space and new line characters
if not line or line.startswith( '#' ):
continue
fastq_block_lines = ( fastq_block_lines + 1 ) % 4
line_startswith = line[0:1]
if fastq_block_lines == 1:
# line 1 is sequence title
if not seq_title_startswith:
seq_title_startswith = line_startswith
if seq_title_startswith != line_startswith:
stop_err( 'Invalid fastqsolexa format at line %d: %s.' %( i + 1, line ) )
read_title = line[ 1: ]
outfile.write( '>%s\n' % line[1:] )
elif fastq_block_lines == 2:
# line 2 is nucleotides
read_length = len( line )
outfile.write( '%s\n' % line )
else:
pass
outfile.close()
if __name__ == "__main__": __main__() | [
"[email protected]"
]
| |
525eb46142e733dea7a957256215fb27fe14dbe9 | 786de89be635eb21295070a6a3452f3a7fe6712c | /ParCorAna/tags/V00-00-08/src/unitTestsParCorAna.py | 2f089c31ef0f1ccbdfde74ff8e947a820923c7ba | []
| no_license | connectthefuture/psdmrepo | 85267cfe8d54564f99e17035efe931077c8f7a37 | f32870a987a7493e7bf0f0a5c1712a5a030ef199 | refs/heads/master | 2021-01-13T03:26:35.494026 | 2015-09-03T22:22:11 | 2015-09-03T22:22:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,144 | py | #--------------------------------------------------------------------------
# Description:
# Test script for ParCorAna
#
#------------------------------------------------------------------------
#--------------------------------
# Imports of standard modules --
#--------------------------------
import sys
import os
import logging
#import stat
import tempfile
import unittest
from cStringIO import StringIO
#import subprocess as sb
#import collections
#import math
import numpy as np
import h5py
import glob
import shutil
#-----------------------------
# Imports for other modules --
#-----------------------------
import psana
from AppUtils.AppDataPath import AppDataPath
import psana_test.psanaTestLib as ptl
#import h5py
#import psana_test.psanaTestLib as ptl
import ParCorAna as corAna
### helper function
def runCmd(cmd, verbose=True):
o,e,retcode = ptl.cmdTimeOutWithReturnCode(cmd)
if verbose: print "--- ran cmd: %s" % cmd
if verbose: print "output=%s\n\nerror=%s" % (o,e)
if verbose: print "return code=%r" % retcode
return retcode
def removeAllInProgressFromParentDir(fname):
basedir, basename = os.path.split(fname)
assert len(basedir)>0 and os.path.exists(basedir)
inProgressFiles = glob.glob(os.path.join(basedir, "*.inprogress"))
for inProgress in inProgressFiles:
os.unlink(inProgress)
def unindent(x):
def numSpacesStart(ln):
n=0
while len(ln)>0 and ln[0]==' ':
ln = ln[1:]
n+= 1
return n
lns = x.split('\n')
allLeadingSpaces = [numSpacesStart(ln) for ln in lns if len(ln.strip())>0]
minLeadingSpaces = min(allLeadingSpaces)
return '\n'.join([ln[minLeadingSpaces:] for ln in lns])
class FormatFileName( unittest.TestCase ) :
def setUp(self) :
"""
Method called to prepare the test fixture. This is called immediately
before calling the test method; any exception raised by this method
will be considered an error rather than a test failure.
"""
self.longMessage = True
destDirBase = AppDataPath(os.path.join("ParCorAna","testingDir")).path()
self.tempDestDir = tempfile.mkdtemp(dir=destDirBase)
def tearDown(self) :
"""
Method called immediately after the test method has been called and
the result recorded. This is called even if the test method raised
an exception, so the implementation in subclasses may need to be
particularly careful about checking internal state. Any exception raised
by this method will be considered an error rather than a test failure.
This method will only be called if the setUp() succeeds, regardless
of the outcome of the test method.
"""
shutil.rmtree(self.tempDestDir, ignore_errors=True)
def test_formatFileName(self):
fname = os.path.join(self.tempDestDir, "file.h5")
fname_w_T = os.path.join(self.tempDestDir, "file_%T.h5")
fname_w_C = os.path.join(self.tempDestDir, "file_%C.h5")
fname_other = os.path.join(self.tempDestDir, "file_jnk.h5")
self.assertEqual(corAna.formatFileName(fname),fname)
tmfname = corAna.formatFileName(fname_w_T)
os.system('touch %s' % tmfname)
self.assertNotEqual(tmfname,fname)
# %C 2015 05 05 16 19 59
self.assertEqual(len(tmfname),len(fname_w_T)-2 +4 +2 +2 +2 +2 +2, msg="tmfname=%s" % tmfname)
os.system('touch %s' % fname)
os.system('touch %s' % tmfname)
c0 = corAna.formatFileName(fname_w_C)
self.assertNotEqual(c0,fname)
self.assertEqual(c0, fname_w_C.replace('%C','000'))
os.system('touch %s' % c0)
c1 = corAna.formatFileName(fname_w_C)
self.assertEqual(c1, fname_w_C.replace('%C','001'))
os.system('touch %s' % c1)
os.system('touch %s' % fname_other)
c2 = corAna.formatFileName(fname_w_C)
self.assertEqual(c2, fname_w_C.replace('%C','002'))
class ParCorAna( unittest.TestCase ) :
def setUp(self) :
"""
Method called to prepare the test fixture. This is called immediately
before calling the test method; any exception raised by this method
will be considered an error rather than a test failure.
"""
self.longMessage = True
def tearDown(self) :
"""
Method called immediately after the test method has been called and
the result recorded. This is called even if the test method raised
an exception, so the implementation in subclasses may need to be
particularly careful about checking internal state. Any exception raised
by this method will be considered an error rather than a test failure.
This method will only be called if the setUp() succeeds, regardless
of the outcome of the test method.
"""
pass
def test_parseDataSetString(self):
'''test parseDataSetString function
'''
dsOpts = corAna.parseDataSetString('exp=amo123:run=12')
self.assertEqual(dsOpts['exp'],'amo123')
self.assertEqual(dsOpts['run'],[12])
self.assertEqual(dsOpts['h5'],False)
self.assertEqual(dsOpts['xtc'],True)
self.assertEqual(dsOpts['live'],False)
self.assertEqual(dsOpts['shmem'],False)
def test_noConfig(self):
system_params = {}
user_params = {}
test_alt = False
self.assertRaises(AssertionError, corAna.CommSystemFramework, system_params, user_params, test_alt)
def test_logger(self):
msg1 = 'hi there'
msg2 = 'what?'
try:
stdout = sys.stdout
stderr = sys.stderr
sys.stdout = StringIO()
sys.stderr = StringIO()
self.assertRaises(AssertionError, corAna.makeLogger, False, True, True, True, 0, 'INFO', False)
l = corAna.makeLogger( False, True, False, False, 0, 'INFO', False)
l2 = corAna.makeLogger( False, True, False, False, 0, 'INFO', False) # make sure getting another ref doesn't double handlers
l.info(msg1)
l.warning(msg2)
except Exception,e:
sys.stdout = stdout
sys.stderr = stderr
raise e
stderrLns = [ln for ln in sys.stderr.getvalue().split('\n') if len(ln.strip())>0]
stdoutLns = [ln for ln in sys.stdout.getvalue().split('\n') if len(ln.strip())>0]
sys.stderr.close()
sys.stdout.close()
sys.stdout = stdout
sys.stderr = stderr
self.assertEqual(len(stderrLns),2)
self.assertEqual(len(stdoutLns),0)
self.assertTrue(stderrLns[0].find('INFO')>0 and stderrLns[0].find(msg1)>0, msg='log ln=%s does not have INFO nor %s in it' % (stderrLns[0], msg1))
self.assertTrue(stderrLns[1].find('WARNING')>0 and stderrLns[1].find(msg2)>0, msg='log ln=%s does not have WARNING nor %s in it' % (stderrLns[1], msg2))
class Cspad2x2( unittest.TestCase ) :
'''Test on small cspad2x2
'''
def setUp(self) :
"""
Method called to prepare the test fixture. This is called immediately
before calling the test method; any exception raised by this method
will be considered an error rather than a test failure.
"""
pass
dataDir = os.path.join(ptl.getMultiFileDataDir(), 'test_013_xcsi0314')
experiment = 'xcsi0314'
run = 178
maskColorDir = os.path.join(dataDir, 'maskColorDir')
maskFileBaseName = '%s-r%d_XcsEndstation_0_Cspad2x2_0_mask_ndarrCoords.npy' % (experiment, run)
testMaskFileBaseName = '%s-r%d_XcsEndstation_0_Cspad2x2_0_testmask_ndarrCoords.npy' % (experiment, run)
colorFileBaseName = '%s-r%d_XcsEndstation_0_Cspad2x2_0_color_ndarrCoords.npy' % (experiment, run)
maskFile = os.path.join(maskColorDir, maskFileBaseName)
testMaskFile = os.path.join(maskColorDir, testMaskFileBaseName)
colorFile = os.path.join(maskColorDir, colorFileBaseName)
assert os.path.exists(maskFile), "mask file %s doesn't exist" % maskFile
assert os.path.exists(testMaskFile), "test maskfile %s doesn't exist" % testMaskFile
assert os.path.exists(colorFile), "color file %s doesn't exist" % colorkFile
numServers = 1
# make a random directory for the testing that we will remove when done
destDirBase = AppDataPath(os.path.join("ParCorAna","testingDir")).path()
assert len(destDirBase)>0, "did not find testingDir base dir in the ParCorAna data dir"
# tempDestDir = tempfile.mkdtemp(dir=destDirBase)
tempDestDir = os.path.join(destDirBase, "mytest") # DVD REMOVE
if not os.path.exists(tempDestDir): os.mkdir(tempDestDir)
h5outputBaseName = 'g2calc_cspad2x2_%%s_%s-r%4.4d.h5' % (experiment, run) # has %%s for for testName
testH5outputBaseName = 'test_' + h5outputBaseName
h5outputFile = os.path.join(tempDestDir, h5outputBaseName)
testH5outputFile = os.path.join(tempDestDir, testH5outputBaseName)
removeAllInProgressFromParentDir(h5outputFile)
userClass = '--TESTS-MUST-FILL-THIS-IN--'
testName = '--TESTS-MUST-FILL-THIS-IN--'
numTimes = 100 # test data only has 60 events
delays = [1, 2, 3, 5, 7, 10, 15, 23, 34, 50]
self.formatDict = locals().copy()
self.numEvents = 60 # There are 60 events in the test data.
# these 60 events go from fiducials 33132 -> 33312, they go by 3 *except* that they skip
# fiducial 33300. So as 120hz counter times, these go from 1 to 61 and they skip 57.
# the number of delay counts we'll get will be 60-delay for delays > 4
# and 59-delay for delays <=4.
def expectedDelay(delay):
if delay > 4: return 60 - delay
return 59-delay
self.expectedCounts = [expectedDelay(delay) for delay in delays]
# Here are commands to see this:
# eventCountCmd = 'psana -m PrintEventId %s/e*-r%4.4d*.xtc | grep fiducials | grep -v "fiducials=131071" | wc' % (self.dataDir, self.run)
# evtCountOut, evtCountErr = ptl.cmdTimeOut(eventCountCmd)
# numEventsFromCmd = int(evtCountOut.split()[0])
# self.assertEqual(numEvents, numEventsFromCmd, "ran cmd=%s expected to get %d events, but got %d" % (eventCountCmd, numEvents, numEventsFromCmd))
self.tempDestDir = tempDestDir
self.dataDir = dataDir
self.run = run
self.configFileContent='''
import psana
import numpy as np
import ParCorAna as corAna
system_params = {{}}
system_params['dataset'] = 'exp={experiment}:run={run}:dir={dataDir}'
system_params['src'] = 'DetInfo(XcsEndstation.0:Cspad2x2.0)'
system_params['psanaType'] = psana.CsPad2x2.ElementV1
system_params['ndarrayProducerOutKey'] = 'ndarray'
system_params['ndarrayCalibOutKey'] = 'calibrated'
system_params['psanaOptions'], system_params['outputArrayType'] = \\
corAna.makePsanaOptions(srcString=system_params['src'],
psanaType=system_params['psanaType'],
ndarrayOutKey=system_params['ndarrayProducerOutKey'],
ndarrayCalibOutKey=system_params['ndarrayCalibOutKey'])
system_params['workerStoreDtype']=np.float32
system_params['maskNdarrayCoords'] = '{maskFile}'
system_params['testMaskNdarrayCoords'] = '{testMaskFile}'
system_params['numServers'] = {numServers}
system_params['serverHosts'] = None # None means system selects which hosts to use (default).
system_params['times'] = {numTimes}
system_params['update'] = 0
system_params['delays'] = {delays}
testName = '{testName}'
system_params['h5output'] = '{h5outputFile}' % testName
system_params['testH5output'] = '{testH5outputFile}' % testName
system_params['overwrite'] = True
system_params['verbosity'] = 'INFO'
system_params['numEvents'] = 0
system_params['testNumEvents'] = 0
import ParCorAna.UserG2 as UserG2
system_params['userClass'] = {userClass}
user_params = {{}}
user_params['colorNdarrayCoords'] = '{colorFile}'
user_params['saturatedValue'] = (1<<15)
user_params['LLD'] = 1E-9
user_params['notzero'] = 1E-5
user_params['psmon_plot'] = False
'''
def tearDown(self) :
"""
Method called immediately after the test method has been called and
the result recorded. This is called even if the test method raised
an exception, so the implementation in subclasses may need to be
particularly careful about checking internal state. Any exception raised
by this method will be considered an error rather than a test failure.
This method will only be called if the setUp() succeeds, regardless
of the outcome of the test method.
"""
pass
# shutil.rmtree(self.tempDestDir, ignore_errors=True) DVD REMOVE
def test_FilesSame(self):
'''
check that the input files haven't changed
'''
md5sums={'maskColorDir/xcsi0314-r178_XcsEndstation_0_Cspad2x2_0_color_ndarrCoords.npy': 'dad6ebe25b364eeea4114c036b54ea4c',
'maskColorDir/xcsi0314-r178_XcsEndstation_0_Cspad2x2_0_mask_ndarrCoords.npy': '9b8ade01f93fc087228c15cad9944856',
'maskColorDir/xcsi0314-r178_XcsEndstation_0_Cspad2x2_0_testmask_ndarrCoords.npy': '282715e77fb5e4247a6b0851f3b244ea',
'e524-r0178-s00-c00.xtc': 'b73a43ee4393c8c793d430f951cad021',
'e524-r0178-s01-c00.xtc': 'eee2248370bef1a94202d5d6afd89799',
'e524-r0178-s02-c00.xtc': 'd340d899c5ab36f34b75df419af3b711',
'e524-r0178-s03-c00.xtc': '111d1ab55c6bbb685bea7d5501587e1d',
'e524-r0178-s04-c00.xtc': '18fcbc6eec20d2a94f31750f49dc1bda',
'e524-r0178-s05-c00.xtc': '9d87909f0c613ca6433fc94d0985521d'
}
for fname, prev_md5 in md5sums.iteritems():
fullFname = os.path.join(self.dataDir,fname)
assert os.path.exists(fullFname)
cur_md5 = ptl.get_md5sum(fullFname)
self.assertEqual(cur_md5, prev_md5, msg="md5 has changed for %s. old=%s new=%s" % \
(fullFname, prev_md5, cur_md5))
def writeConfigFile(self, configname):
configFileName = os.path.join(self.tempDestDir, configname)
configFile = file(configFileName, 'w')
configFile.write(unindent(self.configFileContent.format(**self.formatDict)))
configFile.close()
return configFileName
def checkDelays(self, h5fileName, delays, expectedCounts):
h5file = h5py.File(h5fileName,'r')
systemDelays = list(h5file['system/system_params/delays'][:])
userDelays = list(h5file['user/G2_results_at_000060/delays'][:])
self.assertListEqual(delays, systemDelays, msg='delays written to config != system delays')
self.assertListEqual(systemDelays, userDelays, msg="in h5 output file, system and user section do not have same delays")
counts = list(h5file['user/G2_results_at_000060/delay_counts'][:])
self.assertEqual(len(counts), len(expectedCounts))
self.assertListEqual(counts, expectedCounts, msg="delay counts wrong.\nAns=%r\nRes=%r\nDly=%r" % \
(expectedCounts, counts, list(delays)))
def test_G2atEnd(self):
self.formatDict['userClass']='UserG2.G2atEnd'
testName = 'atEnd'
self.formatDict['testName'] = testName
configFileName = self.writeConfigFile('config_G2atEnd.py')
cmd = 'mpiexec -n 4 parCorAnaDriver --test_main -c ' + configFileName
self.assertEqual(0, runCmd(cmd, verbose=True), msg="Error running %s" % cmd)
# check delays
h5outputFile = self.formatDict['h5outputFile'] % testName
self.checkDelays(h5outputFile , self.formatDict['delays'], self.expectedCounts)
cmd = 'parCorAnaDriver --test_alt -c ' + configFileName
self.assertEqual(0, runCmd(cmd, verbose=True), msg="Error running %s" % cmd)
cmd = 'parCorAnaDriver --cmp -c ' + configFileName
self.assertEqual(0, runCmd(cmd, verbose=True), msg="error running %s - files must differ" % cmd)
def test_G2IncrementalAccumulator(self):
self.formatDict['userClass']='UserG2.G2IncrementalAccumulator'
testName = 'incrAccum'
self.formatDict['testName'] = testName
configFileName = self.writeConfigFile('config_G2IncrementalAccumulator.py')
cmd = 'mpiexec -n 4 parCorAnaDriver --test_main -c ' + configFileName
self.assertEqual(0, runCmd(cmd, verbose=True), msg="Error running %s" % cmd)
# check delays
h5outputFile = self.formatDict['h5outputFile'] % testName
self.checkDelays(h5outputFile, self.formatDict['delays'], self.expectedCounts)
cmd = 'parCorAnaDriver --test_alt -c ' + configFileName
self.assertEqual(0, runCmd(cmd, verbose=True), msg="Error running %s" % cmd)
cmd = 'parCorAnaDriver --cmp -c ' + configFileName
self.assertEqual(0, runCmd(cmd, verbose=True), msg="error running %s - files must differ" % cmd)
def test_G2Window(self):
self.formatDict['userClass']='UserG2.G2IncrementalWindowed'
testName = 'windowa'
self.formatDict['testName'] = testName
self.formatDict['numTimes'] = 20 # 60 events, so we will get a smaller window
delays = self.formatDict['delays']
self.assertListEqual(delays,[1,2,3,5,7,10,15,23,34,50])
# --- the twenty fiducials we will have will effectively look like
# 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 -- 18 19 20 21
self.expectedCounts = [ 18, 17, 16, 15, 13, 10, 5, 0, 0, 0]
configFileName = self.writeConfigFile('config_G2windoweda.py')
cmd = 'mpiexec -n 4 parCorAnaDriver --test_main -c ' + configFileName
self.assertEqual(0, runCmd(cmd, verbose=True), msg="Error running %s" % cmd)
# check delays
h5outputFile = self.formatDict['h5outputFile'] % testName
self.checkDelays(h5outputFile, self.formatDict['delays'], self.expectedCounts)
cmd = 'parCorAnaDriver --test_alt -c ' + configFileName
self.assertEqual(0, runCmd(cmd, verbose=True), msg="Error running %s" % cmd)
cmd = 'parCorAnaDriver --cmp -c ' + configFileName
self.assertEqual(0, runCmd(cmd, verbose=True), msg="error running %s - files must differ" % cmd)
# we expect windowed incremental to produce the same result as G2 at end with a small numTimes
self.formatDict['userClass']='UserG2.G2atEnd'
self.formatDict['testName'] = 'windowedb'
configFileName = self.writeConfigFile('config_G2windowedb.py')
cmd = 'mpiexec -n 4 parCorAnaDriver --test_main -c ' + configFileName
self.assertEqual(0, runCmd(cmd, verbose=True), msg="Error running %s" % cmd)
h5A = h5outputFile
h5B = self.formatDict['h5outputFile'] % testName
cmd = 'cmpParCorAnaH5OutputPy %s %s' % (h5A, h5B)
print "running cmd=%s" % cmd
o,e,retcode = ptl.cmdTimeOutWithReturnCode(cmd)
print "stdout=%s\nstderr=%s" % (o,e)
self.assertEqual(0, retcode, msg="comparing windowed to atEnd with numTimes=%d failed" % self.formatDict['numTimes'])
if __name__ == "__main__":
unittest.main(argv=[sys.argv[0], '-v'])
| [
"[email protected]@b967ad99-d558-0410-b138-e0f6c56caec7"
]
| [email protected]@b967ad99-d558-0410-b138-e0f6c56caec7 |
701cceafbc17e8595614eabc2d26909564c55589 | 99249dad36df26a712ae8d900041d53acf3901ea | /settings/channel_archiver/NIH.SAMPLE_FROZEN_XRAY_settings.py | 0cf3746eba7d2d3f8917deddf604d4ffaa86f80d | [
"MIT"
]
| permissive | bopopescu/Lauecollect | f1f79c2cc5ff106df0dedbd6939ec92630d2b305 | 60ae2b05ea8596ba0decf426e37aeaca0bc8b6be | refs/heads/master | 2022-11-29T00:40:28.384831 | 2019-06-05T01:21:36 | 2019-06-05T01:21:36 | 280,989,300 | 0 | 0 | MIT | 2020-07-20T02:03:22 | 2020-07-20T02:03:22 | null | UTF-8 | Python | false | false | 111 | py | SPOTS.filename = '//femto/C/All Projects/APS/Experiments/2019.05/Test/Archive/NIH.SAMPLE_FROZEN_XRAY.SPOTS.txt' | [
"[email protected]"
]
| |
d224d3604bd4bf178bcc2ccbd591c0f88336a58b | 77d808f47101202db6cec5a9eee6b38c55f73fde | /24. Regular Expressions/04.py | 62ae2b81b1544adb49f3011abd21606be8b3f9cb | []
| no_license | dimDamyanov/Py-Fundamentals | 2ce5591fbfebf8d95c832e3f7109b24e53dd721b | 5ccae5bfa456829d97e8773ee9f5eaa5f5051765 | refs/heads/main | 2023-01-29T22:21:07.788061 | 2020-12-13T08:11:04 | 2020-12-13T08:11:04 | 317,682,227 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | import re
data = input()
numbers = [n.group(0) for n in re.finditer(r'(^|(?<=\s))-*\d+(\.\d+)*($|(?=\s))', data)]
print(*numbers, sep=' ') | [
"[email protected]"
]
| |
058258ee3d0ec5cbba5e415fadbcea87d45b8a9d | a1c6fea0703d7d813a88aae91a7fbb17e06785ea | /third_admin/apps.py | daa061129e27bc5023c0d553c5287f0a2b872cb2 | []
| no_license | warm200/SpokesTribe | bea676b2868272ceab17176d7eb5d98ae7747543 | 8c3671214e317987645aeef4451e590bcb772f7e | refs/heads/master | 2022-01-11T18:12:40.847007 | 2019-02-08T13:08:38 | 2019-02-08T13:08:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | from django.apps import AppConfig
class ThirdAdminConfig(AppConfig):
name = 'third_admin'
| [
"[email protected]"
]
| |
138ad6816981ced62f71bd3859116d1fa7ecfa16 | e8d34c096f9df7f22ff5ccee34cf9f6e6a0adab4 | /flask_login/test_gpios.py | 3de4b6984d75c2f38c64ff8539dbc50799074af9 | []
| no_license | MarianoDel/coralpreto_py | 50fed2bd4032d4e3adc29c06de4b096ee1b3833a | 06bbe3f814fdbf80ae58b1ba6a53d0e96f0ec566 | refs/heads/master | 2023-03-07T14:19:19.074639 | 2022-03-25T17:34:38 | 2022-03-25T17:34:38 | 238,445,438 | 0 | 0 | null | 2023-03-05T06:04:33 | 2020-02-05T12:30:05 | JavaScript | UTF-8 | Python | false | false | 1,416 | py | # -*- coding: utf-8 -*-
#usar python3
import time
RUNNING_ON_RASP = 0
if RUNNING_ON_RASP:
from gpios import *
GpiosInit()
def TestBlue():
print ("start blinking blue led for 10 secs")
if RUNNING_ON_RASP:
LedBlueToggleContinous('start')
time.sleep(10)
print ("ending toggling")
if RUNNING_ON_RASP:
LedBlueToggleContinous('stop')
print ("test ended!")
def TestChannel ():
channel = ['09', '12', '14', '71', '72', '74', '77', '81']
for i in range(len(channel)):
print ("memory: " + str(i) + " test channel: " + channel[i])
if RUNNING_ON_RASP:
Channel_to_Memory(channel)
time.sleep(5)
print ("test ended!")
def TestPtt():
print ("PTT on for 5 secs")
if RUNNING_ON_RASP:
PttOn()
time.sleep(5)
if RUNNING_ON_RASP:
PttOff()
print ("Ptt off")
print ("test ended!")
def TestEncendido():
print ("Encendido on for 5 secs")
if RUNNING_ON_RASP:
OnOff_On()
time.sleep(5)
if RUNNING_ON_RASP:
OnOff_Off()
print ("Encendido off")
print ("test ended!")
def InitialValues ():
LedBlueOff()
PttOff()
OnOff_Off()
Bit0Off()
Bit1Off()
Bit2Off()
##############
# Main Tests #
##############
InitialValues()
TestBlue()
TestChannel()
TestPtt()
TestEncendido()
GpiosCleanUp()
| [
"[email protected]"
]
| |
fe5008878edb08f5883649ab0765b19fdb4de0ce | 3b944f1714c458c5d6d0e84d4b1498f2b59c4ef7 | /581. Shortest Unsorted Continuous Subarray.py | 3fa7d45d3112e56a100aa8150f35c38a0d623fae | []
| no_license | shiannn/LeetCodePython | e4d66f108200d8329616b3e45b70c3f8fc4cd9ed | 6e4472d41904e60ff9d70b5f3979c5dcae98c838 | refs/heads/master | 2021-06-26T03:24:03.079077 | 2021-02-24T16:54:18 | 2021-02-24T16:54:18 | 213,206,772 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 654 | py | class Solution:
def findUnsortedSubarray(self, nums) -> int:
end = -2
start = -1
max_ = -float('inf')
for idx, num in enumerate(nums):
max_ = max(max_, num)
if max_ != num:
end = idx
min_ = float('inf')
for idx, num in reversed(list(enumerate(nums))):
#print(idx, num)
min_ = min(min_, num)
if min_ != num:
start = idx
#print(start, end)
return end - start + 1
if __name__ == '__main__':
sol = Solution()
nums = [2,6,4,8,10,9,15]
ret = sol.findUnsortedSubarray(nums)
print(ret) | [
"[email protected]"
]
| |
4911d82b51dc9ec4b68a07e2dc8f0b5229a842e6 | 099f8740e61878c92c067e96d76ccb014cd342c3 | /robovat/simulation/__init__.py | 718ea92343dbbbccf8f49643d7c02676671f222b | [
"MIT"
]
| permissive | UT-Austin-RPL/robovat | c52d7f0b5b4244ad19fc7c15c876e005626bf182 | c333ce7f1d7b156bedf28c3b09793f5487b6690a | refs/heads/master | 2023-01-06T12:32:39.304293 | 2020-11-12T20:12:25 | 2020-11-12T20:12:25 | 290,521,446 | 7 | 2 | MIT | 2020-08-28T17:33:52 | 2020-08-26T14:37:23 | null | UTF-8 | Python | false | false | 381 | py | from robovat.simulation.base import Base
from robovat.simulation.body import Body
from robovat.simulation.constraint import Constraint
from robovat.simulation.controllable_body import ControllableBody
from robovat.simulation.entity import Entity
from robovat.simulation.joint import Joint
from robovat.simulation.link import Link
from robovat.simulation.simulator import Simulator
| [
"[email protected]"
]
| |
a3dafb3d4576186964f7d3265b17eb05cf0d5f78 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/359/usersdata/282/109815/submittedfiles/lecker.py | ac07eef3ffae2806101fd83deb886992b12a8634 | []
| no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | # -*- coding: utf-8 -*-
c=int(input('Digite o número de consultas: '))
pedidos=[]
fabricados=[]
for i in range (0,c,1):
pedidos.append(int(input('Digite o tamanho do taco: ')))
for i in range(0,c,1):
if pedidos[1] not in fabricados:
fabricados.append(pedidos[i])
fabricados.append(pedidos[i])
print(len(fabricados))
| [
"[email protected]"
]
| |
1e5a9be74f78ccb91bf9ebd626783bf8123fcbaf | 8e2e28a191fa5ec5a6c070ec7e9ccad98c8b4a0b | /jiaocheng/02-python核心编程/05-getattribute属性.py | 0751dbd47c6556fed35b260ab822029c2dbcc613 | [
"Apache-2.0"
]
| permissive | kellanfan/python | 4cd61cbc062e2eee3a900fa7447ca5f0b8f1a999 | 912dc05a3bd0ded9544166a68da23ca0a97b84da | refs/heads/master | 2023-04-06T03:04:38.851928 | 2023-04-01T02:45:56 | 2023-04-01T02:45:56 | 65,542,280 | 3 | 5 | null | null | null | null | UTF-8 | Python | false | false | 896 | py | class Itcast(object):
def __init__(self,subject1):
self.subject1 = subject1
self.subject2 = 'cpp'
#属性访问时拦截器,打log
def __getattribute__(self,obj):
print("====1>%s"%obj)
if obj == 'subject1':
print('log subject1')
return 'redirect python'
else: #测试时注释掉这2行,将找不到subject2
temp = object.__getattribute__(self,obj)
print("====2>%s"%str(temp))
# return temp
def show(self):
print('this is Itcast')
s = Itcast("python")
print(s.subject1)
print(s.subject2)
s.show()
#1. 先获取show属性对应的结果,,,,应该是一个方法
#2. 方法()
#就是说对象中,不管是属性还是方法都是引用,如果是方法,只是使用一个变量指向了一个函数
# import types
# p1.eat = types.MethodType(eat, p1)
| [
"[email protected]"
]
| |
5117dc2fd127111959aeb4c16a0827934522c3b0 | 9835b6949fe4c8018de57aee531dedf1509337cc | /September_2020/sep_11_Maximun_Product_Subarray.py | 3c6c6472a9a78f7529c7993b6863e42fdb1b0150 | []
| no_license | jcai0o0/My_Leetcode_Solutions | f6edea0693d252a99e6507a1724a89763113f8a0 | 3fc909c01c6a345f625c9ab9e0f1584ea5fa8ab4 | refs/heads/master | 2023-01-01T04:08:33.929184 | 2020-10-17T02:01:56 | 2020-10-17T02:01:56 | 289,094,613 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | class Solution:
def maxProduct(self, nums: List[int]) -> int:
if not nums:
return 0
N = len(nums)
f = [0] * N
g = [0] * N
f[0] = g[0] = res = nums[0]
for i in range(1, N):
f[i] = max(f[i-1] * nums[i], nums[i], g[i-1] * nums[i])
g[i] = min(f[i-1] * nums[i], nums[i], g[i-1] * nums[i])
res = max(res, f[i])
return res | [
"[email protected]"
]
| |
8a660027140e9e78abc8c1cef9034e5860eafbef | fb613b77989f1c7db1cb4e149adf0430f7a1404a | /ewrec_class.py | 737298189a632a50750821fe3793b71a903c1388 | [
"MIT"
]
| permissive | Jeffrey-Ede/AI-CV-Automation-Elect-Micr | ce64be88d8b76d88c2e29158fa2cd5cdf4d65a7c | b53072132046ff6f1e8861b96c4263abbee2b6eb | refs/heads/AI-CV-Automation-Electr-Micr | 2021-04-27T09:37:44.295030 | 2020-09-14T15:39:35 | 2020-09-14T15:39:35 | 122,518,865 | 3 | 0 | null | 2018-02-28T17:36:01 | 2018-02-22T18:34:05 | Python | UTF-8 | Python | false | false | 17,188 | py | import numpy as np
import glob
import cv2
import arrayfire as af
from skimage.measure import compare_ssim as ssim
from scipy.misc import imread
from scipy.optimize import minimize
class Utility(object):
def __init__(self):
pass
@staticmethod
def np_to_af(np_arr, dtype=af.Dtype.f32):
return af.Array(np_arr.ctypes.data, np_arr.shape, np_arr.dtype.char).as_type(dtype)
@staticmethod
def fft_shift(fft):
return af.shift(fft, fft.dims()[0]//2 + fft.dims()[0]%2, fft.dims()[1]//2 + fft.dims()[1]%2)
@staticmethod
def scale0to1(img):
"""Rescale image between 0 and 1"""
min = np.min(img)
max = np.max(img)
if min == max:
img.fill(0.5)
else:
img = (img-min) / (max-min)
return img.astype(np.float32)
def disp_af(arr):
cv2.namedWindow('CV_Window', cv2.WINDOW_NORMAL)
cv2.imshow('CV_Window', scale0to1(arr.__array__()))
cv2.waitKey(0)
return
@staticmethod
def af_phase(img):
f = lambda x: x if x < np.pi else np.pi-x
vecf = np.vectorize(f)
return vecf(phase)
@staticmethod
def disp_af_complex_amp(fft, log_of_fft=True):
amp = np.log(np.absolute(fft)) if log_of_fft else np.absolute(fft)
cv2.namedWindow('CV_Window', cv2.WINDOW_NORMAL)
cv2.imshow('CV_Window', scale0to1(amp))
cv2.waitKey(0)
return
@staticmethod
def disp_af_complex_phase(fft):
cv2.namedWindow('CV_Window', cv2.WINDOW_NORMAL)
cv2.imshow('CV_Window', scale0to1(af_phase(fft)))
cv2.waitKey(0)
return
@staticmethod
def save_af_as_npy(arr, filename, save_loc=""):
if filename[-4:-1] != ".npy":
filename += ".npy"
if save_loc[-1] != "/":
save_loc += "/"
np.save(save_loc+filename+".npy", arr.__array__())
return
@staticmethod
def get_radial_freq_hist(img, mean=1.0): #Currently unused
abs_shifted_fft = np.abs(np.fft.fftshift(np.fft.fft2(img)))
rows = cols = int(abs_shifted_fft.shape[0])
mid_row = mid_col = int(np.ceil(abs_shifted_fft.shape[0]/2))
max_rad = int(np.ceil(np.sqrt((mid_row)**2 + (mid_col)**2)))+1
radial_profile = np.zeros(max_rad)
for col in range(cols):
for row in range(rows):
radius = np.sqrt((row-mid_row+1)**2 + (col-mid_col+1)**2)
idx = int(np.ceil(radius))
radial_profile[idx] += abs_shifted_fft[col][row]
return radial_profile
@staticmethod
def af_padded_fft2(img, pad_val=0., pad_periods=1):
side = img.dims()[0]
padded_img = af.constant(0., (1+pad_periods)*side, (1+pad_periods)*side)
padded_img[:side, :side] = img
return af.fft2(padded_img)
@staticmethod
def af_unpadded_ifft2(fft, pad_periods=1):
side = fft.dims()[0] // (1+pad_periods)
return af.ifft2(fft)[:side, :side]
###########################################################################################
class EWREC(Utility):
def __init__(self,
stack_dir,
wavelength,
rel_pos=None,
rel_pos_method="phase_corr",
series_type = "cubic",
series_alternating=True, #Focuses alternating about in-focus position
series_middle = None, #Middle focus index only needed if series alternates about centre. If not
#provided, the halfway index will be chosen
series_increasing=True,
defocuses=None,
defocus_search_range=[0., 10.], #nm**-1?
reconstruction_side=None,
defocus_sweep_num=10, #Number of defocuses to try initial sweep to find the defocus
defocus_search_criteria = ["gradient_plateau"],
preprocess_fn=None,
param_refinement = False, #Time-consuming refinement of relative positions and defocus values
nn_refinement=False, #TODO
report_progress=True,
pad_periods=1,
pad_val=0.): #Number of periods of signal to pad it by for fft
self.stack_dir = stack_dir
self.stack = get_stack_from_tifs(dir)
if preprocess_fn:
self.stack = preprocess_fn(self.stack)
self.stack_side = self.stack[0].shape[0]
rel_pos_fns = {"phase_corr": self.rel_pos_phase_corr}
rel_pos = rel_pos_fns["phase_corr"]()
self.display_iter_nums = report_progress
self.pad_periods = pad_periods
self.pad_value = pad_value
#Focal series meta
self.wavelength = wavelength
self.series_type = focal_series_type
self.series_alternating = series_alternating
self.series_mid = series_middle
self.series_increasing = series_increasing
self.reconstruction_side = reconstruction_side
self.rel_pos = rel_pos if rel_pos else self.rel_pos_estimate(rel_pos_method, as_cropping_centres=True)
self.cropped_stack = self.crop_stack(self.rel_pos)
if defocuses:
self.defocuses = defocuses
else:
self.initial_defocus_sweep_num = defocus_sweep_num
self.defocus_search_criteria = defocus_search_criteria
self.defocus_search_range = [1.e9*x for x in defocus_search_range]
self.defocuses = self.defocus_initial_estimate()
if param_refinement:
self.rel_pos, self.defocuses = self.refine_params()
self.exit_wave = self.reconstruct()
@staticmethod
def get_stack_from_tifs(dir):
if dir[-1] != "/":
dir += "/"
files = glob.glob(dir+"*.tif")
stack = [imread(file, mode='F') for file in files]
return stack
def crop_stack(self, centres, side=None, resize_crop_for_reconstruction=True):
"""Crop parts of images from a stack"""
if not side:
#Calculate largest possible crop side
min_dist = 0
for centre in centres:
min_from_side = np.min([centres[0], centres[1], stack_side-centres[0], stack_side-centres[1]])
if min_from_side < min_dist:
min_dist = min_from_side
side = int(2*min_dist)
side_pow2 = int(np.log2(side))**2
crops = []
for img in self.stack:
left = int(centres[0]-side)
right = int(centres[0]+side)
bottom = int(centres[1]-side)
top = int(centres[1]+side)
horiz_over = centres[0]-int(centres[0])
vert_over = centres[1]-int(centres[1])
prop_tl = horiz_over*vert_over
prop_tr = (1.-horiz_over)*vert_over
prop_bl = horiz_over*(1.-vert_over)
prop_br = (1.-horiz_over)*(1.-vert_over)
crop = np.zeros((side, side))
for row, x in zip(range(side), range(left, left+side)):
for col, y in zip(range(side), range(left, left+side)):
zeros[col][row] = prop_tl*img[y+1][x]+prop_tr*img[y+1][x+1]+prop_bl*img[y][x]+prop_br*img[y][x+1]
crops.append(img[bottom:top,left:right])
if resize_crop_for_reconstruction:
return self.correct_cropped_stack_size(crops)
else:
return crops
def correct_cropped_stack_size(self, stack):
crop_side = min(int(np.log2(stack[0].shape[0]))**2, int(np.log2(self.reconstruction_side))**2)
cropped_stack = self.resize_stack(self.cropped_stack, crop_side)
return cropped_stack
@staticmethod
def resize_stack(stack, side):
return [cv2.resize(img, (side, side)) for img in stack]
def rel_pos_estimate(self, method="phase_corr", stack=None, rel_to_top_left=True):
if not stack:
stack = self.stack
rel_pos = []
if method == "phase_corr":
for i in range(1, len(self.stack)):
rel_pos.append(cv2.phaseCorrelate(self.stack[i-1], self.stack[i]))
if rel_to_top_left:
#chain relative positions from the centermost and find the position closest to the mean
pos = [[0., 0.]]*len(rel_pos)
for i, dx, dy in enumerate(rel_pos[1:], 1):
pos[i][0] = pos[i-1][0]+ rel_pos[i][0]
pos[i][1] = pos[i-1][1]+ rel_pos[i][1]
mean = [0., 0.]
for i in range(len(pos)):
mean[0] += pos[i][0]
mean[1] += pos[i][1]
mean[0] /= len(pos)
mean[1] /= len(pos)
dists = [(x-mean[0])**2+(y-mean[1])**2 for x, y in pos]
idx = dists.index(min(dists))
half_side = self.stack_side/2
return [(half_side+mean[0]-x, half_side+mean[1]-y) for x, y in pos]
else:
return rel_pos
@staticmethod
def calc_transfer_func(side, wavelength, defocus_change, pad_periods = 0, spher_aber_coeff=None,
aperture_mask=None):
px_dim = 1.+pad_periods
ctf_coeff = np.pi * wavelength * defocus_change
rec_px_width = 1.0 / (side*px_dim)
rec_origin = -1.0 / (2.0*px_dim)
rec_x_dist = rec_origin + rec_px_width * af.range(side, side, dim=0)
rec_y_dist = rec_origin + rec_px_width * af.range(side, side, dim=1)
rec_dist2 = rec_x_dist*rec_x_dist + rec_y_dist*rec_y_dist
ctf_phase = ctf_coeff*rec_dist2
if spher_aber_coeff:
ctf_phase += 0.5 * np.pi * wavelength**3 * spher_aber_coeff * rec_dist2**2
ctf = af.cos(ctf_phase) + complex(0, 1)*af.sin(ctf_phase)
if aperture_mask:
ctf *= aperture_mask
return ctf.as_type(af.Dtype.c32)
def fft_to_diff(self, fft):
return self.fft_shift(fft)
def diff_to_fft(self, diff):
return self.fft_to_diff(diff)
def propagate_wave(self, img, ctf):
fft = self.af_padded_fft2(img, self.pad_value, self.pad_periods)
ctf = self.diff_to_fft(ctf)
propagation = self.af_unpadded_ifft2(fft*ctf, self.pad_periods)
return propagation
@staticmethod
def propagate_to_focus(img, defocus, wavelength, pad_periods=0):
ctf = calc_transfer_func(
side=int(img.dims()[0]*(1+pad_periods)),
wavelength=wavelength,
defocus_change=-defocus,
pad_periods=pad_periods)
return self.propagate_wave(img, ctf)
@staticmethod
def propagate_back_to_defocus(exit_wave, defocus, wavelength, pad_periods=0):
ctf = calc_transfer_func(
side=int(img.dims()[0](1+pad_periods)),
wavelength=wavelength,
defocus_change=defocus,
pad_periods=pad_periods)
return propagate_wave(exit_wave, ctf)
@staticmethod
def reconstruct(stack, defocuses=None, num_iter = 50, stack_on_gpu=False):
"""GPU accelerate wavefunction reconstruction and mse calculation"""
stack_gpu = stack if stack_on_gpu else [np_to_af(img) for img in stack]
defocuses = defocuses if defocuses else self.defocuses
width = stack[0].shape[0]
height = stack[0].shape[1]
exit_wave = af.constant(0, width, height)
for i in range(num_iter):
if self.display_iter_nums:
print("Iteration {0} of {1}".format(i+1, num_iter))
exit_wave = 0
for img, idx in zip(stack_gpu, range(len(stack_gpu))):
#print("Propagation {0} of {1}".format(idx+1, len(stack)))
exit_wave += self.propagate_to_focus(img, defocuses[idx], self.wavelength)
exit_wave /= len(stack)
for idx in range(len(stack)):
amp = af.abs(stack_gpu[idx])
stack_gpu[idx] = self.propagate_back_to_defocus(exit_wave, defocuses[idx], self.wavelength)
stack_gpu[idx] = (amp / af.abs(stack_gpu[idx])) * stack_gpu[idx]
return exit_wave
def reconstruction_loss(self, stack_gpu, defocus_incr, defocus_ramp):
defocuses = [incr*ramp for incr, ramp in zip(defocus_incr, defocus_ramp)]
reconstruction = reconstruct(stack_gpu.copy(), defocuses, stack_on_gpu=True)
#Use the wavefunction to recreate the original images
deconstruction = [self.propagate_back_to_defocus(reconstruction, defocus, self.wavelength) \
for defocus in defocuses]
losses = [0.]*len(stack_gpu)
for i in range(len(losses)):
collapse = af.abs(deconstruction[i])**2
collapse *= af.mean(stack_gpu[i]) / af.mean(collapse)
losses[i] = af.mean((stack_gpu[i]-collapse)**2)
return np.max(losses)
def defocus_initial_estimate(self):
#Try various defocuses until one is found that matches the expected pattern
stack = self.cropped_stack
if self.series_type == "linear":
gen = lambda x: x
elif self.series_type == "quadratic":
gen = lambda x: x**2
elif self.series_type == "cubic":
gen = lambda x: x**3
mid = (self.series_mid if self.series_mid else len(stack) // 2) if self.series_alternating else 0
defocus_dir = 1.0 if self.series_increasing else -1.0
side = stack[0].shape[0]
stack_gpu = [np_to_af(img, af.Dtype.c32) for img in stack]
search_ramp = [(2**x / 2**self.initial_sweep_num) - 1 for x in range(0, self.initial_defocus_sweep_num)]
m = self.search_range[1]-self.search_range[0]
c = self.search_range[0]
defocus_incr = [m*x+c for x in search_ramp]
defocus_ramp = [defocus_dir*np.sign(x-mid)*gen(x-mid) for x in range(len(stack_gpu))]
losses = [self.reconstruction_loss(stack_gpu, incr, defocus_ramp) for incr in defocus_incr]
#Get the highest loss neigbouring the highest and refine using bilinear interpolation
idx = dists.index(max(losses))
if idx == 0:
idx1, idx2 = idx, 0
elif idx == self.initial_defocus_sweep_num-1:
idx1, idx2 = 0, idx
else:
idx1, idx2 = idx, idx+1 if losses[idx-1] < losses[idx+1] else idx-1, idx
losses = [losses[idx]]
incr1 = defocus_incr[idx1]
incr2 = defocus_incr[idx2]
if self.defocus_search_criteria == "gradient_plateau":
def condition(losses):
if len(losses) == 1:
return True
else:
return losses[-1] < losses[-2]
while True:
incr = 0.5*(incr1+incr2)
losses.append(self.reconstruction_loss(stack_gpu, incr, defocus_ramp))
if condition(losses):
incr1, incr2 = incr2, incr
else:
return incr2
def reconstruction_loss_arbitrary_params(self, centres, defocuses):
stack = self.crop_stack(centres)
stack_gpu = [np_to_af(img, af.Dtype.c32) for img in stack]
reconstruction = reconstruct(stack_gpu.copy(), defocuses, stack_on_gpu=True)
losses = [0.]*len(stack_gpu)
for i in range(len(losses)):
collapse = af.abs(deconstruction[i])**2
collapse *= af.mean(stack_gpu[i]) / af.mean(collapse)
losses[i] = af.mean((stack_gpu[i]-collapse)**2)
return np.max(losses)
def refine_params(self):
x0 = [x for x, _ in self.rel_pos] + [y for _, y in self.rel_pos] + self.defocuses
def loss(x):
len = len(x)
centres = [[0.,0.]]*(len//3)
for i in range(len):
centres[i][0] = x[i]
centres[i][1] = x[i+len]
return self.reconstruction_loss_arbitrary_params(centres, x[(2*len//3):])
refinement = minimize(
loss,
x0,
method='trust-krylov',
tol=1e-6,
iter=100)
x = refinement.x
len = len(x)
centres = [[0.,0.]]*(len//3)
for i in range(len):
centres[i][0] = x[i]
centres[i][1] = x[i+len]
return centres, x[(2*len//3):]
if __name__ == "__main__":
ewrec = EWREC(
stack_dir="E:/dump/stack1/",
wavelength=2.51e-12,
series_type = "quadratic",
series_middle=6,
series_increasing=True,
reconstruction_side=512)
| [
"[email protected]"
]
| |
d4ef79f1d42135b241425cfb23eada729d85805d | 420f974d85376031e66bb7241caedee1675b93ec | /init.py | a071836a49381b59b0ae48ee879ae0dacc8fbade | []
| no_license | uiandwe/chatting | 060c8b513ecd53db9519c97f99198c09cc918e0a | e8430cf4db173d44ee37601b96a8028271000cd1 | refs/heads/master | 2020-04-01T23:33:02.324646 | 2016-06-29T02:26:53 | 2016-06-29T02:26:53 | 62,188,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | __author__ = 'hyeonsj'
# db
host = '127.0.0.1'
user = 'root'
passwd = 'spoqa'
db = 'spoqa'
charset = 'utf8'
# logging level
# debug 10
# warning 30
# error 40
log_level = 10 | [
"[email protected]"
]
| |
7b13f2453af39f2d8ce8980fb548903267988fb9 | e47d5da2a947c3b3a834817d0b084ee65d302067 | /atcoder.jp/aising2020/aising2020_b/Main.py | 066248010306017828be4a1ada26949f6befc4c7 | []
| no_license | aki-nlp/AtCoder | 3293b9b183c0a8cefbf20d7f4f491c6f1e7604b8 | 9385805cbb1fa158f6d3c4a2415cdf7ba94547e5 | refs/heads/master | 2023-02-25T06:04:10.913237 | 2020-10-03T12:02:00 | 2020-10-03T12:02:00 | 296,792,313 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 217 | py | def main():
n = int(input())
a = list(map(int, input().split()))
a = a[::2]
ans = 0
for aa in a:
if aa%2 == 1:
ans += 1
print(ans)
if __name__ == '__main__':
main() | [
"[email protected]"
]
| |
5f0e5ecf4312f6a94fb5df3eca0368782d2e1f45 | 69889d51e933b4e8a1d4c8397a317aa1d1365a5a | /String/KMP/13506.py | ace84b813e5a35c288b459bc91aa9047c3fb07b6 | []
| no_license | ddraa/Algorithm | a35c87631420ceccec6f7094da6f2b22ddb66c8c | a97c6628d5389f7f93603a2e95ac3b569057f556 | refs/heads/master | 2023-06-25T17:12:39.925821 | 2021-07-18T05:53:28 | 2021-07-18T05:53:28 | 279,240,088 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | import sys
def LIS(P):
lp = len(P)
Table = [0] * lp
i = 0
for j in range(1, lp):
while i > 0 and P[i] != P[j]:
i = Table[i - 1]
if P[i] == P[j]:
i += 1
Table[j] = i
return Table
print(LIS("papapapap")) | [
"[email protected]"
]
| |
d3a6aa42166b4d18271f903f734bb3137b484836 | 0ec0fa7a6dc0659cc26113e3ac734434b2b771f2 | /4.refactored/log/2016-11-21@09:03/minibatch.py | 81fc07180a820f169d2b248b9cd4647a948aba64 | []
| no_license | goldleaf3i/3dlayout | b8c1ab3a21da9129829e70ae8a95eddccbf77e2f | 1afd3a94a6cb972d5d92fe373960bd84f258ccfe | refs/heads/master | 2021-01-23T07:37:54.396115 | 2017-03-28T10:41:06 | 2017-03-28T10:41:06 | 86,431,368 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46,935 | py | from __future__ import division
import datetime as dt
import numpy as np
import util.layout as lay
import util.GrafoTopologico as gtop
import util.transitional_kernels as tk
import util.MappaSemantica as sema
import util.frontiere as fr
from object import Segmento as sg
from util import pickle_util as pk
from util import accuracy as ac
from util import layout as lay
from util import disegna as dsg
from util import predizionePlan_geometriche as pgeom
from object import Superficie as fc
from object import Spazio as sp
from object import Plan as plan
from util import MCMC as mcmc
from util import valutazione as val
from shapely.geometry import Polygon
import parameters as par
import pickle
import os
import glob
import shutil
import time
import cv2
import warnings
warnings.warn("Settare i parametri del lateralLine e cvThresh")
def start_main(parametri_obj, path_obj):
#----------------------------1.0_LAYOUT DELLE STANZE----------------------------------
#------inizio layout
#leggo l'immagine originale in scala di grigio e la sistemo con il thresholding
img_rgb = cv2.imread(path_obj.metricMap)
img_ini = img_rgb.copy() #copio l'immagine
# 127 per alcuni dati, 255 per altri
ret,thresh1 = cv2.threshold(img_rgb,parametri_obj.cv2thresh,255,cv2.THRESH_BINARY)#prova
#------------------1.1_CANNY E HOUGH PER TROVARE MURI---------------------------------
walls , canny = lay.start_canny_ed_hough(thresh1,parametri_obj)
print len(walls)
#walls , canny = lay.start_canny_ed_hough(img_rgb,parametri_obj)
if par.DISEGNA:
#disegna mappa iniziale, canny ed hough
dsg.disegna_map(img_rgb,filepath = path_obj.filepath, format='png')
dsg.disegna_canny(canny,filepath = path_obj.filepath, format='png')
dsg.disegna_hough(img_rgb,walls,filepath = path_obj.filepath, format='png')
lines = lay.flip_lines(walls, img_rgb.shape[0]-1)
walls = lay.crea_muri(lines)
print "lines", len(lines), len(walls)
if par.DISEGNA:
#disegno linee
dsg.disegna_segmenti(walls, format='png')#solo un disegno poi lo elimino
#------------1.2_SETTO XMIN YMIN XMAX YMAX DI walls-----------------------------------
#tra tutti i punti dei muri trova l'ascissa e l'ordinata minima e massima.
estremi = sg.trova_estremi(walls)
xmin = estremi[0]
xmax = estremi[1]
ymin = estremi[2]
ymax = estremi[3]
offset = 20
xmin -= offset
xmax += offset
ymin -= offset
ymax += offset
#-------------------------------------------------------------------------------------
#---------------1.3_CONTORNO ESTERNO--------------------------------------------------
#(contours, vertici) = lay.contorno_esterno(img_rgb, parametri_obj, path_obj)
(contours, vertici) = lay.contorno_esterno_versione_tre(img_rgb)
if par.DISEGNA:
dsg.disegna_contorno(vertici,xmin,ymin,xmax,ymax,filepath = path_obj.filepath, format='png')
#-------------------------------------------------------------------------------------
#---------------1.4_MEAN SHIFT PER TROVARE CLUSTER ANGOLARI---------------------------
(indici, walls, cluster_angolari) = lay.cluster_ang(parametri_obj.h, parametri_obj.minOffset, walls, diagonali= parametri_obj.diagonali)
if par.DISEGNA:
#dsg.disegna_cluster_angolari(walls, cluster_angolari, filepath = path_obj.filepath,savename = '5b_cluster_angolari')
dsg.disegna_cluster_angolari_corretto(walls, cluster_angolari, filepath = path_obj.filepath,savename = '5b_cluster_angolari',format='png')
#-------------------------------------------------------------------------------------
#---------------1.5_CLUSTER SPAZIALI--------------------------------------------------
#questo metodo e' sbagliato, fai quella cosa con il hierarchical clustering per classificarli meglio.e trovare in sostanza un muro
#cluster_spaziali = lay.cluster_spaz(parametri_obj.minLateralSeparation, walls)
#inserisci qui il nuovo Cluster_spaz
nuovo_clustering = 2 #1 metodo di matteo, 2 mio
#in walls ci sono tutti i segmenti
if nuovo_clustering == 1:
cluster_spaziali = lay.cluster_spaz(parametri_obj.minLateralSeparation, walls)#metodo di matteo
elif nuovo_clustering ==2:
cluster_mura = lay.get_cluster_mura(walls, cluster_angolari, parametri_obj)#metodo di valerio
cluster_mura_senza_outliers = []
for c in cluster_mura:
if c!=-1:
cluster_mura_senza_outliers.append(c)
# ottengo gli outliers
# outliers = []
# for s in walls:
# if s.cluster_muro == -1:
# outliers.append(s)
# dsg.disegna_segmenti(outliers, savename = "outliers")
#ora che ho un insieme di cluster relativi ai muri voglio andare ad unire quelli molto vicini
#ottengo i rappresentanti dei cluster (tutti tranne gli outliers)
#segmenti_rappresentanti = lay.get_rappresentanti(walls, cluster_mura)
segmenti_rappresentanti = lay.get_rappresentanti(walls, cluster_mura_senza_outliers)
if par.DISEGNA:
dsg.disegna_segmenti(segmenti_rappresentanti,filepath = path_obj.filepath, savename = "5c_segmenti_rappresentanti", format='png')
#classifico i rappresentanti
#qui va settata la soglia con cui voglio separare i cluster muro
#segmenti_rappresentanti = segmenti_rappresentanti
segmenti_rappresentanti = sg.spatialClustering(parametri_obj.sogliaLateraleClusterMura, segmenti_rappresentanti)
#in questo momento ho un insieme di segmenti rappresentanti che hanno il cluster_spaziale settato correttamente, ora setto anche gli altri che hanno lo stesso cluster muro
cluster_spaziali = lay.new_cluster_spaziale(walls, segmenti_rappresentanti, parametri_obj)
if par.DISEGNA:
dsg.disegna_cluster_spaziali(cluster_spaziali, walls,filepath = path_obj.filepath, format='png')
dsg.disegna_cluster_mura(cluster_mura, walls,filepath = path_obj.filepath, savename= '5d_cluster_mura', format='png')
#-------------------------------------------------------------------------------------
#-------------------1.6_CREO EXTENDED_LINES-------------------------------------------
(extended_lines, extended_segments) = lay.extend_line(cluster_spaziali, walls, xmin, xmax, ymin, ymax,filepath = path_obj.filepath)
if par.DISEGNA:
dsg.disegna_extended_segments(extended_segments, walls,filepath = path_obj.filepath, format='png')
#-------------------------------------------------------------------------------------
#-------------1.7_CREO GLI EDGES TRAMITE INTERSEZIONI TRA EXTENDED_LINES--------------
edges = sg.crea_edges(extended_segments)
#-------------------------------------------------------------------------------------
#----------------------1.8_SETTO PESI DEGLI EDGES-------------------------------------
edges = sg.setPeso(edges, walls)
#-------------------------------------------------------------------------------------
#----------------1.9_CREO LE CELLE DAGLI EDGES----------------------------------------
celle = fc.crea_celle(edges)
#-------------------------------------------------------------------------------------
#----------------CLASSIFICO CELLE-----------------------------------------------------
global centroid
#verificare funzioni
if par.metodo_classificazione_celle ==1:
print "1.metodo di classificazione ", par.metodo_classificazione_celle
(celle, celle_out, celle_poligoni, indici, celle_parziali, contorno, centroid, punti) = lay.classificazione_superfici(vertici, celle)
elif par.metodo_classificazione_celle==2:
print "2.metodo di classificazione ", par.metodo_classificazione_celle
#sto classificando le celle con il metodo delle percentuali
(celle_out, celle, centroid, punti,celle_poligoni, indici, celle_parziali) = lay.classifica_celle_con_percentuale(vertici, celle, img_ini)
#-------------------------------------------------------------------------------------
#--------------------------POLIGONI CELLE---------------------------------------------
(celle_poligoni, out_poligoni, parz_poligoni, centroid) = lay.crea_poligoni_da_celle(celle, celle_out, celle_parziali)
#ora vorrei togliere le celle che non hanno senso, come ad esempio corridoi strettissimi, il problema e' che lo vorrei integrare con la stanza piu' vicina ma per ora le elimino soltanto
#RICORDA: stai pensando solo a celle_poligoni
#TODO: questo metodo non funziona benissimo(sbagli ad eliminare le celle)
#celle_poligoni, celle = lay.elimina_celle_insensate(celle_poligoni,celle, parametri_obj)#elimino tutte le celle che hanno una forma strana e che non ha senso siano stanze
#-------------------------------------------------------------------------------------
#------------------CREO LE MATRICI L, D, D^-1, ED M = D^-1 * L------------------------
(matrice_l, matrice_d, matrice_d_inv, X) = lay.crea_matrici(celle, sigma = parametri_obj.sigma)
#-------------------------------------------------------------------------------------
#----------------DBSCAN PER TROVARE CELLE NELLA STESSA STANZA-------------------------
clustersCelle = lay.DB_scan(parametri_obj.eps, parametri_obj.minPts, X, celle_poligoni)
#questo va disegnato per forza perche' restituisce la lista dei colori
if par.DISEGNA:
colori, fig, ax = dsg.disegna_dbscan(clustersCelle, celle, celle_poligoni, xmin, ymin, xmax, ymax, edges, contours,filepath = path_obj.filepath, format='png')
else:
colori = dsg.get_colors(clustersCelle, format='png')
#-------------------------------------------------------------------------------------
#------------------POLIGONI STANZE(spazio)--------------------------------------------
stanze, spazi = lay.crea_spazio(clustersCelle, celle, celle_poligoni, colori, xmin, ymin, xmax, ymax, filepath = path_obj.filepath)
if par.DISEGNA:
dsg.disegna_stanze(stanze, colori, xmin, ymin, xmax, ymax,filepath = path_obj.filepath, format='png')
#-------------------------------------------------------------------------------------
#cerco le celle parziali
coordinate_bordi = [xmin, ymin, xmax, ymax]
celle_parziali, parz_poligoni = lay.get_celle_parziali(celle, celle_out, coordinate_bordi)#TODO: non ho controllato bene ma mi pare che questa cosa possa essere inserita nel metodo 1 che crca le celle parziali
#creo i poligoni relativi alle celle_out
out_poligoni = lay.get_poligoni_out(celle_out)
# TODO: questo blocco e' da eliminare, mi serviva solo per risolvere un bug
# l = []
# for i,p in enumerate(out_poligoni):
# l.append(i)
# col_prova = dsg.get_colors(l)
# dsg.disegna_stanze(out_poligoni, col_prova, xmin, ymin, xmax, ymax,filepath = path_obj.filepath, savename='0a_prova')
# exit()
#
#--------------------------------fine layout------------------------------------------
#------------------------------GRAFO TOPOLOGICO---------------------------------------
#costruisco il grafo
(stanze_collegate, doorsVertices, distanceMap, points, b3) = gtop.get_grafo(path_obj.metricMap, stanze, estremi, colori, parametri_obj)
(G, pos) = gtop.crea_grafo(stanze, stanze_collegate, estremi, colori)
#ottengo tutte quelle stanze che non sono collegate direttamente ad un'altra, con molta probabilita' quelle non sono stanze reali
stanze_non_collegate = gtop.get_stanze_non_collegate(stanze, stanze_collegate)
#ottengo le stanze reali, senza tutte quelle non collegate
stanze_reali, colori_reali = lay.get_stanze_reali(stanze, stanze_non_collegate, colori)
if par.DISEGNA:
#sto disegnando usando la lista di colori originale, se voglio la lista della stessa lunghezza sostituire colori con colori_reali
dsg.disegna_stanze(stanze_reali, colori_reali, xmin, ymin, xmax, ymax,filepath = path_obj.filepath, savename = '8_Stanze_reali', format='png')
#------------------------------------------------------------------------------------
if par.DISEGNA:
dsg.disegna_distance_transform(distanceMap, filepath = path_obj.filepath, format='png')
dsg.disegna_medial_axis(points, b3, filepath = path_obj.filepath, format='png')
dsg.plot_nodi_e_stanze(colori,estremi, G, pos, stanze, stanze_collegate, filepath = path_obj.filepath, format='png')
#-----------------------------fine GrafoTopologico------------------------------------
#-------------------------------------------------------------------------------------
#DA QUI PARTE IL NUOVO PEZZO
#IDEA:
#1) trovo le celle parziali(uno spazio e' parziali se almeno una delle sue celle e' parziale) e creo l'oggetto Plan
#2) postprocessing per capire se le celle out sono realmente out
#3) postprocessing per unire gli spazi che dovrebbero essere uniti
#creo l'oggetto plan che contiene tutti gli spazi, ogni stanza contiene tutte le sue celle, settate come out, parziali o interne.
#setto gli spazi come out se non sono collegati a nulla.
spazi = sp.get_spazi_reali(spazi, stanze_reali) #elimino dalla lista di oggetti spazio quegli spazi che non sono collegati a nulla.
#---------------------------trovo le cellette parziali--------------------------------
#se voglio il metodo che controlla le celle metto 1,
#se voglio il confronto di un intera stanza con l'esterno metto 2
#se volgio il confronto di una stanza con quelli che sono i pixel classificati nella frontiera metto 3
trova_parziali=3
if par.mappa_completa ==False and trova_parziali==1:
#QUESTO METODO OGNI TANTO SBAGLIA PER VIA DELLA COPERTURA DEI SEGMANTI, verifico gli errori con il postprocessing per le stanze parziali.
#TODO: Questo deve essere fatto solo se sono in presenza di mappe parziali
sp.set_cellette_parziali(spazi, parz_poligoni)#trovo le cellette di uno spazio che sono parziali
spazi = sp.trova_spazi_parziali(spazi)#se c'e' almeno una celletta all'interno di uno spazio che e' parziale, allora lo e' tutto lo spazio.
#creo l'oggetto Plan
#faccio diventare la lista di out_poligoni delle cellette
cellette_out = []
for p,c in zip(out_poligoni, celle_out):
celletta = sp.Celletta(p,c)
celletta.set_celletta_out(True)
cellette_out.append(celletta)
plan_o = plan.Plan(spazi, contorno, cellette_out) #spazio = oggetto Spazio. contorno = oggetto Polygon, cellette_out = lista di Cellette
dsg.disegna_spazi(spazi, colori, xmin, ymin, xmax, ymax,filepath = path_obj.filepath, savename = '13_spazi', format='png')
if par.mappa_completa ==False and trova_parziali==2:
#secondo metodo per trovare gli spazi parziali. Fa una media pesata. migliore rispetto al primo ma bisogna fare tuning del parametro
plan.trova_spazi_parziali_due(plan_o)
if par.mappa_completa == False and trova_parziali==3:
#terzo metodo per trovare le celle parziali basato sulla ricerca delle frontiere.
immagine_cluster, frontiere, labels, lista_pixel_frontiere = fr.ottieni_frontire_principali(img_ini)
if len(labels) > 0:
plan.trova_spazi_parziali_da_frontiere(plan_o, lista_pixel_frontiere, immagine_cluster, labels)
spazi = sp.trova_spazi_parziali(plan_o.spazi)
if par.DISEGNA:
dsg.disegna_map(immagine_cluster,filepath = path_obj.filepath, savename = '0a_frontiere', format='png')
#-------------------------------------------------------------------------------------
#-----------------------------calcolo peso per extended_segments----------------------
#calcolo il peso di un extended segment in base alla copertura sei segmenti. Ovviamente non potra' mai essere 100%.
extended_segments = sg.setPeso(extended_segments, walls)#TODO:controllare che sia realmente corretto
#calcolo per ogni extended segment quante sono le stanze che tocca(la copertura)
lay.calcola_copertura_extended_segment(extended_segments, plan_o.spazi)
plan_o.set_extended_segments(extended_segments)
#-------------------------------------------------------------------------------------
#---------------------------unisco spazi oversegmentati ------------------------------
#unisco le spazi che sono state divisi erroneamente
#fa schifissimo come metodo(nel caso lo utilizziamo per MCMCs)
uniciStanzeOversegmentate = 2
#1) primo controlla cella per cella
#2) unisce facendo una media pesata
#3) non unisce le stanze, non fa assolutamente nulla, usato per mappe parziali se non voglio unire stanze
if uniciStanzeOversegmentate ==1:
#fa schifissimo come metodo(nel caso lo utilizziamo per MCMCs)
#unione stanze
#provo ad usare la distance transforme
#dsg.disegna_distance_transform_e_stanze(distanceMap,stanze,colori, filepath = path_obj.filepath, savename = 'distance_and_stanze')
#se esistono due spazi che sono collegati tramite un edge di una cella che ha un peso basso allora unisco quegli spazi
plan.unisci_stanze_oversegmentate(plan_o)
#cambio anche i colori
dsg.disegna_spazi(plan_o.spazi, dsg.get_colors(plan_o.spazi), xmin, ymin, xmax, ymax,filepath = path_obj.filepath, savename = '13b_spazi_nuovo', format='png')
elif uniciStanzeOversegmentate == 2:
#TODO: questo metodo funziona meglio del primo, vedere se vale la pena cancellare il primo
#metodo molto simile a quello di Mura per il postprocessing
plan.postprocessing(plan_o, parametri_obj)
dsg.disegna_spazi(plan_o.spazi, dsg.get_colors(plan_o.spazi), xmin, ymin, xmax, ymax,filepath = path_obj.filepath, savename = '13b_spazi_nuovo', format='png')
else:
#se non voglio unire le stanze, ad esempio e' utile quando sto guardando le mappe parziali
dsg.disegna_spazi(plan_o.spazi, dsg.get_colors(plan_o.spazi), xmin, ymin, xmax, ymax,filepath = path_obj.filepath, savename = '13b_spazi_nuovo', format='png')
#-------------------------------------------------------------------------------------
#------------------------------PREDIZIONE GEOMETRICA----------------------------------
#da qui comincia la parte di predizione, io la sposterei in un altro file
#ricavo gli spazi parziali
cellette_out = plan_o.cellette_esterne
spazi_parziali = []
for s in plan_o.spazi:
if s.parziale == True:
spazi_parziali.append(s)
import copy
plan_o_2 = copy.deepcopy(plan_o)#copio l'oggetto per poter eseguire le azioni separatamente
plan_o_3 = copy.deepcopy(plan_o)
#metodo di predizione scelto.
#se MCMC == True si vuole predirre con il MCMC, altrimenti si fanno azioni geometriche molto semplici
if par.MCMC ==True:
# TODO:da eliminare, mi serviva solo per delle immagini e per controllare di aver fatto tutto giusto
#TODO: MCMC rendilo una funzione privata o di un altro modulo, che se continui a fare roba qua dentro non ci capisci piu' nulla.
#guardo quali sono gli extended che sto selezionando
for index,s in enumerate(spazi_parziali):
celle_di_altre_stanze = []
for s2 in plan_o.spazi:
if s2 !=s:
for c in s2.cells:
celle_di_altre_stanze.append(c)
#-----non serve(*)
celle_circostanti = celle_di_altre_stanze + cellette_out #creo una lista delle celle circostanti ad una stanza
a = sp.estrai_extended_da_spazio(s, plan_o.extended_segments, celle_circostanti)
tot_segment = list(set(a))
#dsg.disegna_extended_segments(tot_segment, walls,filepath = path_obj.filepath, format='png', savename = '7a_extended'+str(index))
#extended visti di una stanza parziale.
b= sp.estrai_solo_extended_visti(s, plan_o.extended_segments, celle_circostanti)#estraggo solo le extended sicuramente viste
tot_segment_visti = list(set(b))
#dsg.disegna_extended_segments(tot_segment_visti, walls,filepath = path_obj.filepath, format='png', savename = '7b_extended'+str(index))
#-----fine(*)
#computo MCMC sulla stanza in considerazione
mcmc.computa_MCMC(s, plan_o, celle_di_altre_stanze, index, xmin, ymin, xmax, ymax, path_obj)
dsg.disegna_spazi(plan_o.spazi, dsg.get_colors(plan_o.spazi), xmin, ymin, xmax, ymax,filepath = path_obj.filepath, savename = '14_MCMC', format='png')
if par.azione_complessa == True:
#1) FACCIO AZIONE SEMPLICE PER AGGIUNGERE CELLE VISTE DAL LASER
#2) FACCIO AZIONE COMPLESSA: nel quale vado a creare l'intero spazio degli stati fino ad una certa iterazione.
#-------------------------------AZIONE GEOMETRICA 1)----------------------------------
#-----AGGIUNGO CELLE OUT A CELLE PARZIALI SOLO SE QUESTE CELLE OUT SONO STATE TOCCANTE DAL BEAM DEL LASER
for s in spazi_parziali:
celle_confinanti = pgeom.estrai_celle_confinanti_alle_parziali(plan_o, s)#estraggo le celle confinanti alle celle interne parziali delle stanze parziali.
print "le celle confinanti sono: ", len(celle_confinanti)
#unisco solo se le celle sono state toccate dal beam del laser
celle_confinanti = plan.trova_celle_toccate_dal_laser_beam(celle_confinanti, immagine_cluster)
#delle celle confinanti non devo unire quelle che farebbero sparire una parete.
celle_confinanti = pgeom.elimina_celle_con_parete_vista(celle_confinanti, s)
#faccio una prova per unire una cella che e' toccata dal beam del laser.
if len(celle_confinanti)>0:
#unisco la cella allo spazio
for cella in celle_confinanti:
if cella.vedo_frontiera == True:
sp.aggiungi_cella_a_spazio(s, cella, plan_o)
dsg.disegna_spazi(plan_o.spazi, dsg.get_colors(plan_o.spazi), xmin, ymin, xmax, ymax,filepath = path_obj.filepath, savename = '13c_azione_geom_1', format='png')
#-----------------------------AZIONE COMPLESSA--------------------------------
for index,s in enumerate(spazi_parziali):
#estraggo le celle delle altre stanze
celle_di_altre_stanze = plan.estrai_celle_di_altre_stanze(s,plan_o)
#creo il mio spazio degli stati
level= 1 #questa e la profondita' con la quale faccio la mia ricerca, oltre al secondo livello non vado a ricercare le celle.
elementi = pgeom.estrai_spazio_delle_celle(s, plan_o, level)
elementi = pgeom.elimina_spazi_sul_bordo_da_candidati(elementi, plan_o) #per ora non considero elementi che toccano il bordo, perchs' tanto non voglio aggiungerli e mi ingrandiscono lo spazio degli stati per nulla.
print "gli elementi sono:", len(elementi)
print "-------inizio calcolo permutazioni-------"
permutazioni = pgeom.possibili_permutazioni(elementi)
print "-------fine calcolo permutazioni-------"
print "il numero di permutazioni sono:", len(permutazioni)
if len(permutazioni)>0:
#per ogni permutazione degli elementi devo controllare il costo che avrebbe il layout con l'aggiunta di tutte le celle di quella permutazione.
permutazioni_corrette = []
score_permutazioni_corrette = []
for indice,permutazione in enumerate(permutazioni):
ok=False
pgeom.aggiunge_celle_permutazione(permutazione, plan_o, s)#aggiungo le celle della permutazione corrente alla stanza
#calcolo penalita'
penal1_dopo = val.penalita1(s)#piu' questo valore e' alto peggio e', valori prossimi allo zero indicano frome convesse.
penal4_dopo = val.penalita4(s, plan_o, celle_di_altre_stanze)#conto il numero di extended che ci sono dopo aver aggiungere la permutazione, sfavorisce i gradini
# il risultato potrebbe portare ad una stanza non Polygon, allora quella permutazione non e' valida
if type(s.spazio)== Polygon:
ok = True
permutazioni_corrette.append(permutazione)
#elimino dalla lista delle permutazioni tutte quelle permutazioni che hanno gli stessi elementi
for p in permutazioni:
vuoto= list(set(p)-set(permutazione))
if len(vuoto)==0 and len(p)== len(permutazione) and p!= permutazione:
permutazioni.remove(p)
#------------valuto il layout con permutazione aggiunta---------------
score = val.score_function(penal1_dopo, penal4_dopo)#non ancora implementata fino alla fine
score_permutazioni_corrette.append(score)
#----------------------fine valutazione-----------------------------------
#disegno
dsg.disegna_spazi(plan_o.spazi, dsg.get_colors(plan_o.spazi), xmin, ymin, xmax, ymax,filepath = path_obj.filepath, savename = 'permutazioni/14_stanza'+str(index)+'permutazioni_'+str(indice)+'_a', format='png')#TODO:DECOMMENTA SE NON SEI IN BATCH
else:
#elimina la permutazione perche' non e' valida
permutazioni.remove(permutazione)
#------
pgeom.elimina_celle_permutazione(permutazione, plan_o, s)
if ok ==True:
a=0
#dsg.disegna_spazi(plan_o.spazi, dsg.get_colors(plan_o.spazi), xmin, ymin, xmax, ymax,filepath = path_obj.filepath, savename = 'permutazioni/14_stanza'+str(index)+'permutazioni_'+str(indice)+'_b', format='png')#TODO:DECOMMENTA SE NON SEI IN BATCH
#------
print "permutazione", indice
#valuto la permutazione che mi permette di minimizzare lo score
if len(score_permutazioni_corrette)>0:
min_score = np.amin(score_permutazioni_corrette)
print "min_core", min_score
posizione_permutazione = score_permutazioni_corrette.index(min_score)
permutazione_migliore = permutazioni_corrette[posizione_permutazione]
#ottenuto lo score migliore lo confronto con lo score del layout originale e guardo quale a' migliore
#calcolo score del layout originale, senza previsioni
penal1_prima = val.penalita1(s)#piu' questo valore e' alto peggio e', valori prossimi allo zero indicano frome convesse.
penal4_prima = val.penalita4(s, plan_o, celle_di_altre_stanze)#conto il numero di extended che ci sono prima di aggiungere la permutazione
score_originale = val.score_function(penal1_prima, penal4_prima)#non ancora implementata fino alla fine
print "score_originale", score_originale
if min_score<=score_originale:
#preferisco fare una previsione
permutazione_migliore = permutazione_migliore
pgeom.aggiunge_celle_permutazione(permutazione_migliore, plan_o, s)
else:
#il layout originale ottenuto e' migliore di tutti gli altri, non faccio nessuana previsione per la stanza corrente
pass
else:
#non ho trovato permutazioni che hanno senso, allora lascio tutto come e'
pass
#disegno le computazioni migliori TODO: momentaneo, solo perche' in questo momento uso solo la penalita' della convessita'
dsg.disegna_spazi(plan_o.spazi, dsg.get_colors(plan_o.spazi), xmin, ymin, xmax, ymax,filepath = path_obj.filepath, savename = '14_stanza'+str(index)+'azione_complessa', format='png')
#---------------------------FINE AZIONE COMPLESSA-----------------------------
# for r in permutazioni:
# print r
# print "\n\n"
#
# poligoni= []
# colori=[]
# for ele in elementi:
# poligoni.append(ele.cella)
# colori.append('#800000')
#
# dsg.disegna_stanze(poligoni,colori , xmin, ymin, xmax, ymax,filepath = path_obj.filepath, savename = '15_poligoni_esterni_stanza'+str(index), format='png')
#
#-----------------------------AZIONE COMPLESSA--------------------------------
#stampo il layout finale
dsg.disegna_spazi(plan_o.spazi, dsg.get_colors(plan_o.spazi), xmin, ymin, xmax, ymax,filepath = path_obj.filepath, savename = '15_azione_complessa', format='png')
if par.azioni_semplici==True:
#------------------------------AZIONE GEOMETRICA 1)+2)--------------------------------
#-------------------------------AZIONE GEOMETRICA 1)----------------------------------
#-----AGGIUNGO CELLE OUT A CELLE PARZIALI SOLO SE QUESTE CELLE OUT SONO STATE TOCCANTE DAL BEAM DEL LASER
celle_candidate = []
for s in spazi_parziali:
celle_confinanti = pgeom.estrai_celle_confinanti_alle_parziali(plan_o, s)#estraggo le celle confinanti alle celle interne parziali delle stanze parziali.
print "le celle confinanti sono: ", len(celle_confinanti)
#unisco solo se le celle sono state toccate dal beam del laser
celle_confinanti = plan.trova_celle_toccate_dal_laser_beam(celle_confinanti, immagine_cluster)
#delle celle confinanti non devo unire quelle che farebbero sparire una parete.
celle_confinanti = pgeom.elimina_celle_con_parete_vista(celle_confinanti, s)
#faccio una prova per unire una cella che e' toccata dal beam del laser.
if len(celle_confinanti)>0:
#unisco la cella allo spazio
for cella in celle_confinanti:
if cella.vedo_frontiera == True:
sp.aggiungi_cella_a_spazio(s, cella, plan_o)
dsg.disegna_spazi(plan_o.spazi, dsg.get_colors(plan_o.spazi), xmin, ymin, xmax, ymax,filepath = path_obj.filepath, savename = '13c_azione_geom_1', format='png')
#-------------------------------AZIONE GEOMETRICA 2)-----------------------------------
#--UNISCO LE CELLE IN BASE ALLE PARETI CHE CONDIVIDONO CON ALTRE STANZE
for s in spazi_parziali:
#estraggo le celle out che confinano con le celle parziali
celle_confinanti = pgeom.estrai_celle_confinanti_alle_parziali(plan_o, s)#estraggo le celle confinanti alle celle interne parziali delle stanze parziali.
print "le celle confinanti sono: ", len(celle_confinanti)
#delle celle confinanti appena estratte devo prendere solamente quelle che hanno tutti i lati supportati da una extended line
celle_confinanti = pgeom.estrai_celle_supportate_da_extended_segmement(celle_confinanti, s, plan_o.extended_segments)
#delle celle confinanti non devo unire quelle che farebbero sparire una parete.
celle_confinanti = pgeom.elimina_celle_con_parete_vista(celle_confinanti, s)
#unisco solo quelle selezionate
#TODO questa parte e' da cancellare
if len(celle_confinanti)>0:
#unisco la cella allo spazio
for cella in celle_confinanti:
sp.aggiungi_cella_a_spazio(s, cella, plan_o)
dsg.disegna_spazi(plan_o.spazi, dsg.get_colors(plan_o.spazi), xmin, ymin, xmax, ymax,filepath = path_obj.filepath, savename = '13e_azione_geom_1_piu_geom_2', format='png')
#----------------------------------FINE 1)+2)-----------------------------------------
#----------------------------FACCIO SOLO AZIONE GEOM 2)-------------------------------
#questa azione la faccio su una copia di plan
#ricavo gli spazi parziali dalla copia di plan_o che sono esattamente una copia di spazi_parziali precedente.
cellette_out = plan_o_2.cellette_esterne
spazi_parziali = []
for s in plan_o_2.spazi:
if s.parziale == True:
spazi_parziali.append(s)
cella_prova =None#eli
spp = None#eli
for s in spazi_parziali:
#estraggo le celle out che confinano con le celle parziali
celle_confinanti = pgeom.estrai_celle_confinanti_alle_parziali(plan_o_2, s)#estraggo le celle confinanti alle celle interne parziali delle stanze parziali.
print "le celle confinanti sono: ", len(celle_confinanti)
#delle celle confinanti appena estratte devo prendere solamente quelle che hanno tutti i lati supportati da una extended line
celle_confinanti = pgeom.estrai_celle_supportate_da_extended_segmement(celle_confinanti, s, plan_o_2.extended_segments)
print "le celle confinanti sono2: ", len(celle_confinanti)
#delle celle confinanti non devo unire quelle che farebbero sparire una parete.
celle_confinanti = pgeom.elimina_celle_con_parete_vista(celle_confinanti, s)
print "le celle confinanti sono3: ", len(celle_confinanti)
#unisco solo quelle selezionate
#TODO questa parte e' da cancellare
if len(celle_confinanti)>0:
#unisco la cella allo spazio
for cella in celle_confinanti:
sp.aggiungi_cella_a_spazio(s, cella, plan_o_2)
cella_prova = cella#elimina
spp = s#elimina
dsg.disegna_spazi(plan_o_2.spazi, dsg.get_colors(plan_o_2.spazi), xmin, ymin, xmax, ymax,filepath = path_obj.filepath, savename = '13d_azione_geom_2', format='png')
#----------------------------------FINE SOLO AZIONE GEOM 2)--------------------------
#------------------------CREO PICKLE--------------------------------------------------
#creo i file pickle per il layout delle stanze
print("creo pickle layout")
pk.crea_pickle((stanze, clustersCelle, estremi, colori, spazi, stanze_reali, colori_reali), path_obj.filepath_pickle_layout)
print("ho finito di creare i pickle del layout")
#creo i file pickle per il grafo topologico
print("creo pickle grafoTopologico")
pk.crea_pickle((stanze, clustersCelle, estremi, colori), path_obj.filepath_pickle_grafoTopologico)
print("ho finito di creare i pickle del grafo topologico")
#-----------------------CALCOLO ACCURACY----------------------------------------------
#L'accuracy e' da controllare, secondo me non e' corretta.
if par.mappa_completa:
#funzione per calcolare accuracy fc e bc
print "Inizio a calcolare metriche"
results, stanze_gt = ac.calcola_accuracy(path_obj.nome_gt,estremi,stanze_reali, path_obj.metricMap,path_obj.filepath, parametri_obj.flip_dataset)
#results, stanze_gt = ac.calcola_accuracy(path_obj.nome_gt,estremi,stanze, path_obj.metricMap,path_obj.filepath, parametri_obj.flip_dataset)
if par.DISEGNA:
dsg.disegna_grafici_per_accuracy(stanze, stanze_gt, filepath = path_obj.filepath, format='png')
print "Fine calcolare metriche"
else:
#setto results a 0, giusto per ricordarmi che non ho risultati per le mappe parziali
results = 0
stanze_gt = ac.get_stanze_gt(path_obj.nome_gt, estremi, flip_dataset = False)
if par.DISEGNA:
#raccolgo i poligoni
stanze_acc = []
for spazio in plan_o.spazi:
stanze_acc.append(spazio.spazio)
dsg.disegna_grafici_per_accuracy(stanze_acc, stanze_gt, filepath = path_obj.filepath, format='png')
#in questa fase il grafo non e' ancora stato classificato con le label da dare ai vai nodi.
#-------------------------------------------------------------------------------------
#creo il file xml dei parametri
par.to_XML(parametri_obj, path_obj)
#-------------------------prova transitional kernels----------------------------------
#splitto una stanza e restituisto la nuova lista delle stanze
#stanze, colori = tk.split_stanza_verticale(2, stanze, colori,estremi)
#stanze, colori = tk.split_stanza_orizzontale(3, stanze, colori,estremi)
#stanze, colori = tk.slit_all_cell_in_room(spazi, 1, colori, estremi) #questo metodo e' stato fatto usando il concetto di Spazio, dunque fai attenzione perche' non restituisce la cosa giusta.
#stanze, colori = tk.split_stanza_reverce(2, len(stanze)-1, stanze, colori, estremi) #questo unisce 2 stanze precedentemente splittate, non faccio per ora nessun controllo sul fatto che queste 2 stanze abbiano almeno un muro in comune, se sono lontani succede un casino
#-----------------------------------------------------------------------------------
#-------------------------MAPPA SEMANTICA-------------------------------------------
'''
#in questa fase classifico i nodi del grafo e conseguentemente anche quelli della mappa.
#gli input di questa fase non mi sono ancora molto chiari
#per ora non la faccio poi se mi serve la copio/rifaccio, penso proprio sia sbagliata.
#stanze ground truth
(stanze_gt, nomi_stanze_gt, RC, RCE, FCES, spaces, collegate_gt) = sema.get_stanze_gt(nome_gt, estremi)
#corrispondenze tra gt e segmentate (backward e forward)
(indici_corrispondenti_bwd, indici_gt_corrispondenti_fwd) = sema.get_corrispondenze(stanze,stanze_gt)
#creo xml delle stanze segmentate
id_stanze = sema.crea_xml(nomeXML,stanze,doorsVertices,collegate,indici_gt_corrispondenti_fwd,RCE,nomi_stanze_gt)
#parso xml creato, va dalla cartella input alla cartella output/xmls, con feature aggiunte
xml_output = sema.parsa(dataset_name, nomeXML)
#classifico
predizioniRCY = sema.classif(dataset_name,xml_output,'RC','Y',30)
predizioniRCN = sema.classif(dataset_name,xml_output,'RC','N',30)
predizioniFCESY = sema.classif(dataset_name,xml_output,'RCES','Y',30)
predizioniFCESN = sema.classif(dataset_name,xml_output,'RCES','N',30)
#creo mappa semantica segmentata e ground truth e le plotto assieme
sema.creaMappaSemantica(predizioniRCY, G, pos, stanze, id_stanze, estremi, colori, clustersCelle, collegate)
sema.creaMappaSemanticaGt(stanze_gt, collegate_gt, RC, estremi, colori)
plt.show()
sema.creaMappaSemantica(predizioniRCN, G, pos, stanze, id_stanze, estremi, colori, clustersCelle, collegate)
sema.creaMappaSemanticaGt(stanze_gt, collegate_gt, RC, estremi, colori)
plt.show()
sema.creaMappaSemantica(predizioniFCESY, G, pos, stanze, id_stanze, estremi, colori, clustersCelle, collegate)
sema.creaMappaSemanticaGt(stanze_gt, collegate_gt, FCES, estremi, colori)
plt.show()
sema.creaMappaSemantica(predizioniFCESN, G, pos, stanze, id_stanze, estremi, colori, clustersCelle, collegate)
sema.creaMappaSemanticaGt(stanze_gt, collegate_gt, FCES, estremi, colori)
plt.show()
'''
#-----------------------------------------------------------------------------------
print "to be continued..."
return results
#TODO
def load_main(filepath_pickle_layout, filepath_pickle_grafoTopologico, parXML):
#carico layout
pkl_file = open(filepath_pickle_layout, 'rb')
data1 = pickle.load(pkl_file)
stanze = data1[0]
clustersCelle = data1[1]
estremi = data1[2]
colori = data1[3]
spazi = data1[4]
stanze_reali = data1[5]
colori_reali= data1[6]
#print "controllo che non ci sia nulla di vuoto", len(stanze), len(clustersCelle), len(estremi), len(spazi), len(colori)
#carico il grafo topologico
pkl_file2 = open( filepath_pickle_grafoTopologico, 'rb')
data2 = pickle.load(pkl_file2)
G = data2[0]
pos = data2[1]
stanze_collegate = data2[2]
doorsVertices = data2[3]
#creo dei nuovi oggetti parametri caricando i dati dal file xml
new_parameter_obj, new_path_obj = par.load_from_XML(parXML)
#continuare il metodo da qui
def makeFolders(location,datasetList):
for dataset in datasetList:
if not os.path.exists(location+dataset):
os.mkdir(location+dataset)
os.mkdir(location+dataset+"_pickle")
def main():
start = time.time()
print ''' PROBLEMI NOTI \n
1] LE LINEE OBLIQUE NON VANNO;\n
2] NON CLASSIFICA LE CELLE ESTERNE CHE STANNO DENTRO IL CONVEX HULL, CHE QUINDI VENGONO CONSIDERATE COME STANZE;\n
OK 3] ACCURACY NON FUNZIONA;\n
4] QUANDO VENGONO RAGGRUPPATI TRA DI LORO I CLUSTER COLLINEARI, QUESTO VIENE FATTO A CASCATA. QUESTO FINISCE PER ALLINEARE ASSIEME MURA MOLTO DISTANTI;\n
5] IL SISTEMA E' MOLTO SENSIBILE ALLA SCALA. BISOGNEREBBE INGRANDIRE TUTTE LE IMMAGINI FACENDO UN RESCALING E RISOLVERE QUESTO PROBLEMA. \n
[4-5] FANNO SI CHE I CORRIDOI PICCOLI VENGANO CONSIDERATI COME UNA RETTA UNICA\n
6] BISOGNEREBBE FILTRARE LE SUPERFICI TROPPO PICCOLE CHE VENGONO CREATE TRA DEI CLUSTER;\n
7] LE IMMAGINI DI STAGE SONO TROPPO PICCOLE; VANNO RIPRESE PIU GRANDI \n
>> LANCIARE IN BATCH SU ALIENWARE\n
>> RENDERE CODICE PARALLELO\n
8] MANCANO 30 DATASET DA FARE CON STAGE\n
9] OGNI TANTO NON FUNZIONA IL GET CONTORNO PERCHE SBORDA ALL'INTERNO\n
>> PROVARE CON SCAN BORDO (SU IMMAGINE COPIA)\n
>> PROVARE A SETTARE IL PARAMETRO O A MODIFICARE IL METODO DI SCAN BORDO\n
>> CERCARE SOLUZIONI ALTERNATIVE (ES IDENTIFICARE LE CELLE ESTERNE)\n
OK 10] VANNO TARATI MEGLIO I PARAMETRI PER IL CLUSTERING\n
>> I PARAMETRI DE CLUSTERING SONO OK; OGNI TANTO FA OVERSEGMENTAZIONE.\n
>>> EVENTUALMENTE SE SI VEDE CHE OVERSEGMENTAZIONE SONO UN PROBLEMA CAMBIARE CLUSTERING O MERGE CELLE\n
11] LE LINEE DELLA CANNY E HOUGH TALVOLTA SONO TROPPO GROSSE \n
>> IN REALTA SEMBRA ESSERE OK; PROVARE CON MAPPE PIU GRANDI E VEDERE SE CAMBIA.
12] BISOGNEREBBE AUMENTARE LA SEGMENTAZIONE CON UN VORONOI
OK 13] STAMPA L'IMMAGINE DELLA MAPPA AD UNA SCALA DIVERSA RISPETTO A QUELLA VERA.\n
OK 14] RISTAMPARE SCHOOL_GT IN GRANDE CHE PER ORA E' STAMPATO IN PICCOLO (800x600)\n
OK VEDI 10] 15] NOI NON CALCOLIAMO LA DIFFUSION DEL METODO DI MURA; PER ALCUNI VERSI E' UN BENE PER ALTRI NO\n
OK VEDI 4] 16] NON FACCIAMO IL CLUSTERING DEI SEGMENTI IN MANIERA CORRETTA; DOVREMMO SOLO FARE MEANSHIFT\n
17] LA FASE DEI SEGMENTI VA COMPLETAMENTE RIFATTA; MEANSHIFT NON FUNZIONA COSI'; I SEGMENTI HANNO UN SACCO DI "==" CHE VANNO TOLTI; SPATIAL CLUSTRING VA CAMBIATO;\n
18] OGNI TANTO IL GRAFO TOPOLOGICO CONNETTE STANZE CHE SONO ADIACENTI MA NON CONNESSE. VA RIVISTA LA PARTE DI MEDIALAXIS;\n
19] PROVARE A USARE L'IMMAGINE CON IL CONTORNO RICALCATO SOLO PER FARE GETCONTOUR E NON NEGLI ALTRI STEP.\n
20] TOGLIERE THRESHOLD + CANNY -> USARE SOLO CANNY.\n
21] TOGLIERE LE CELLE INTERNE CHE SONO BUCHI.\n
>> USARE VORONOI PER CONTROLLARE LA CONNETTIVITA.\n
>> USARE THRESHOLD SU SFONDO \n
>> COMBINARE I DUE METODI\n
22] RIMUOVERE LE STANZE ERRATE:\n
>> STANZE "ESTERNE" INTERNE VANNO TOLTE IN BASE ALLE CELLE ESTERNE\n
>> RIMUOVERE STANZE CON FORME STUPIDE (ES PARETI LUNGHE STRETTE), BISOGNA DECIDERE SE ELIMINARLE O INGLOBARLE IN UN ALTRA STANZA\n
23] RISOLVERE TUTTI I WARNING.\n
da chiedere: guardare il metodo clustering_dbscan_celle(...) in layout la riga
af = DBSCAN(eps, min_samples, metric="precomputed").fit(X) non dovrebbe essere cosi?
af = DBSCAN(eps= eps, min_samples = min_samples, metric="precomputed").fit(X)
'''
print '''
FUNZIONAMENTO:\n
SELEZIONARE SU QUALI DATASETs FARE ESPERIMENTI (variabile DATASETs -riga165- da COMMENTARE / DECOMMENTARE)\n
SPOSTARE LE CARTELLE CON I NOMI DEI DATASET CREATI DALL'ESPERIMENTO PRECEDENTE IN UNA SOTTO-CARTELLA (SE TROVA UNA CARTELLA CON LO STESSO NOME NON CARICA LA MAPPA)\n
SETTARE I PARAMERI \n
ESEGUIRE\n
OGNI TANTO IL METODO CRASHA IN FASE DI VALUTAZIONE DI ACCURATEZZA. NEL CASO, RILANCIARLO\n
SPOSTARE TUTTI I RISULTATI IN UNA CARTELLA IN RESULTS CON UN NOME SIGNIFICATIVO DEL TEST FATTO\n
SALVARE IL MAIN DENTRO QUELLA CARTELLA\n
'''
#-------------------PARAMETRI-------------------------------------------------------
#carico parametri di default
parametri_obj = par.Parameter_obj()
#carico path di default
path_obj = par.Path_obj()
#-----------------------------------------------------------------------------------
makeFolders(path_obj.OUTFOLDERS,path_obj.DATASETs)
skip_performed = True
#-----------------------------------------------------------------------------------
#creo la cartella di log con il time stamp
our_time = str(dt.datetime.now())[:-10].replace(' ','@') #get current time
SAVE_FOLDER = os.path.join('./log', our_time)
if not os.path.exists(SAVE_FOLDER):
os.mkdir(SAVE_FOLDER)
SAVE_LOGFILE = SAVE_FOLDER+'/log.txt'
#------------------------------------------------------------------------------------
with open(SAVE_LOGFILE,'w+') as LOGFILE:
print "AZIONE", par.AZIONE
print >>LOGFILE, "AZIONE", par.AZIONE
shutil.copy('./minibatch.py',SAVE_FOLDER+'/minibatch.py') #copio il file del main
shutil.copy('./parameters.py',SAVE_FOLDER+'/parameters.py') #copio il file dei parametri
if par.AZIONE == "batch":
if par.LOADMAIN==False:
print >>LOGFILE, "SONO IN MODALITA' START MAIN"
else:
print >>LOGFILE, "SONO IN MODALITA' LOAD MAIN"
print >>LOGFILE, "-----------------------------------------------------------"
for DATASET in path_obj.DATASETs :
print >>LOGFILE, "PARSO IL DATASET", DATASET
global_results = []
print 'INIZIO DATASET ' , DATASET
for metricMap in glob.glob(path_obj.INFOLDERS+'IMGs/'+DATASET+'/*.png') :
print >>LOGFILE, "---parso la mappa: ", metricMap
print 'INIZIO A PARSARE ', metricMap
path_obj.metricMap =metricMap
map_name = metricMap.split('/')[-1][:-4]
#print map_name
SAVE_FOLDER = path_obj.OUTFOLDERS+DATASET+'/'+map_name
SAVE_PICKLE = path_obj.OUTFOLDERS+DATASET+'_pickle/'+map_name.split('.')[0]
if par.LOADMAIN==False:
if not os.path.exists(SAVE_FOLDER):
os.mkdir(SAVE_FOLDER)
os.mkdir(SAVE_PICKLE)
else:
# evito di rifare test che ho gia fatto
if skip_performed :
print 'GIA FATTO; PASSO AL SUCCESSIVO'
continue
#print SAVE_FOLDER
path_obj.filepath = SAVE_FOLDER+'/'
path_obj.filepath_pickle_layout = SAVE_PICKLE+'/'+'Layout.pkl'
path_obj.filepath_pickle_grafoTopologico = SAVE_PICKLE+'/'+'GrafoTopologico.pkl'
add_name = '' if DATASET == 'SCHOOL' else ''
if par.mappa_completa == False:
nome = map_name.split('_updated')[0]
path_obj.nome_gt = path_obj.INFOLDERS+'XMLs/'+DATASET+'/'+nome+'_updated.xml'
else:
path_obj.nome_gt = path_obj.INFOLDERS+'XMLs/'+DATASET+'/'+map_name+add_name+'.xml'
#--------------------new parametri-----------------------------------
#setto i parametri differenti(ogni dataset ha parametri differenti)
parametri_obj.minLateralSeparation = 7 if (DATASET=='SCHOOL' or DATASET=='PARZIALI' or DATASET=='SCHOOL_grandi') else 15
#parametri_obj.cv2thresh = 150 if DATASET == 'SCHOOL' else 200
parametri_obj.cv2thresh = 150 if (DATASET=='SCHOOL' or DATASET=='PARZIALI' or DATASET == 'SCHOOL_grandi') else 200
parametri_obj.flip_dataset = True if DATASET == 'SURVEY' else False
#--------------------------------------------------------------------
#-------------------ESECUZIONE---------------------------------------
if par.LOADMAIN==False:
print "start main"
results = start_main(parametri_obj, path_obj)
global_results.append(results);
#calcolo accuracy finale dell'intero dataset
if metricMap == glob.glob(path_obj.INFOLDERS+'IMGs/'+DATASET+'/*.png')[-1]:
accuracy_bc_medio = []
accuracy_bc_in_pixels = []
accuracy_fc_medio = []
accuracy_fc_in_pixels=[]
for i in global_results :
accuracy_bc_medio.append(i[0])
accuracy_fc_medio.append(i[2])
accuracy_bc_in_pixels.append(i[4])
accuracy_fc_in_pixels.append(i[5])
filepath= path_obj.OUTFOLDERS+DATASET+'/'
print filepath
f = open(filepath+'accuracy.txt','a')
#f.write(filepath)
f.write('accuracy_bc = '+str(np.mean(accuracy_bc_medio))+'\n')
f.write('accuracy_bc_pixels = '+str(np.mean(accuracy_bc_in_pixels))+'\n')
f.write('accuracy_fc = '+str(np.mean(accuracy_fc_medio))+'\n')
f.write('accuracy_fc_pixels = '+str(np.mean(accuracy_fc_in_pixels))+'\n\n')
f.close()
LOGFILE.flush()
elif par.LOADMAIN==True:
print "load main"
print >>LOGFILE, "---parso la mappa: ", path_obj.metricMap
load_main(path_obj.filepath_pickle_layout, path_obj.filepath_pickle_grafoTopologico, path_obj.filepath+"parametri.xml")
LOGFILE.flush()
else :
continue
break
LOGFILE.flush()
elif par.AZIONE =='mappa_singola':
#-------------------ESECUZIONE singola mappa----------------------------------
if par.LOADMAIN==False:
print "start main"
print >>LOGFILE, "SONO IN MODALITA' START MAIN"
print >>LOGFILE, "---parso la mappa: ", path_obj.metricMap
start_main(parametri_obj, path_obj)
LOGFILE.flush()
else:
print "load main"
print >>LOGFILE, "SONO IN MODALITA' LOAD MAIN"
print >>LOGFILE, "---parso la mappa: ", path_obj.metricMap
load_main(path_obj.filepath_pickle_layout, path_obj.filepath_pickle_grafoTopologico, path_obj.filepath+"parametri.xml")
LOGFILE.flush()
#-------------------TEMPO IMPIEGATO-------------------------------------------------
fine = time.time()
elapsed = fine-start
print "la computazione ha impiegato %f secondi" % elapsed
if __name__ == '__main__':
main() | [
"[email protected]"
]
| |
ba1a284531e5e1f2b4e492eca0027f9a3e9bc9b6 | 102a33464fd3a16ceedd134e9c64fea554ca5273 | /apps/shop/forms.py | 22014c7b482f0b94dbeda97e4c41e71fdb9827e3 | []
| no_license | pythonguru101/django-ecommerce | b688bbe2b1a53c906aa80f86f764cf9787e6c2fe | f94de9c21223716db5ffcb86ba87219da88d2ff4 | refs/heads/master | 2020-07-24T14:57:02.047702 | 2020-06-10T06:06:23 | 2020-06-10T06:06:23 | 207,961,132 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,754 | py | import re
from django import forms
from django.utils.translation import ugettext as _
from markdownx.widgets import MarkdownxWidget
from apps.shop.models import Product, ShippingType, Category
from .plugshop.forms import OrderForm as PlugshopOrderForm
class CategoryAdminForm(forms.ModelForm):
class Meta:
model = Category
fields = '__all__'
widgets = {
'short_description': MarkdownxWidget(),
'description': MarkdownxWidget(),
}
class ProductAdminForm(forms.ModelForm):
class Meta:
model = Product
fields = '__all__'
widgets = {
'short_description': MarkdownxWidget(),
'description': MarkdownxWidget(),
}
class OrderForm(PlugshopOrderForm):
shipping_type = forms.ModelChoiceField(empty_label=None,
queryset=ShippingType.objects.filter(is_active=True))
name = forms.CharField(required=True, error_messages={
'required': _(u'Укажите имя')
})
email = forms.EmailField(required=True, error_messages={
'required': _(u'Укажите email')
})
phone = forms.CharField(required=True, error_messages={
'required': _(u'Укажите телефон')
})
def __require(self, name, error):
value = self.cleaned_data.get(name, None)
if len(value) == 0:
self.errors[name] = [error]
def clean_name(self):
name = self.cleaned_data.get('name').strip().split()
shipping_type = self.cleaned_data.get('shipping_type')
if shipping_type.require_zip_code and len(name) < 3:
raise forms.ValidationError(_(u'Введите фамилию имя и отчество'))
if len(name):
self.cleaned_data['last_name'] = name[0]
self.cleaned_data['first_name'] = " ".join(name[1:])
else:
raise forms.ValidationError(_(u'Введите имя'))
return " ".join(name)
def clean(self):
cleaned_data = self.cleaned_data
shipping_type = cleaned_data.get('shipping_type')
if shipping_type:
if shipping_type.require_address:
self.__require('address', _(u'Не указан адрес доставки'))
if shipping_type.require_zip_code:
self.__require('zip_code', _(u'Не указан индекс'))
self.__require('city', _(u'Не указан город'))
zip_code = self.cleaned_data.get('zip_code', None)
if re.search(r'^\d{6}$', zip_code) is None:
self.errors['zip_code'] = [_(u'Индекс состоит из 6 цифр')]
return cleaned_data
| [
"[email protected]"
]
| |
72ad00e39cc8e6c09b50e778412f8d9d2094a9e5 | 3996539eae965e8e3cf9bd194123989741825525 | /EventFilter/Utilities/rawStreamFileWriterForBU_cfi.py | 55b0b4128380e1fd75980e1887abc4c5ada3b947 | []
| no_license | cms-sw/cmssw-cfipython | 01990ea8fcb97a57f0b0cc44a8bf5cde59af2d98 | 25ee4c810103c4a507ca1b949109399a23a524c5 | refs/heads/CMSSW_11_2_X | 2023-09-01T16:56:00.658845 | 2022-06-20T22:49:19 | 2022-06-20T22:49:19 | 136,184,115 | 1 | 0 | null | 2022-10-19T14:04:01 | 2018-06-05T13:47:28 | Python | UTF-8 | Python | false | false | 291 | py | import FWCore.ParameterSet.Config as cms
rawStreamFileWriterForBU = cms.OutputModule('RawStreamFileWriterForBU',
source = cms.InputTag('rawDataCollector'),
numEventsPerFile = cms.uint32(100),
frdVersion = cms.uint32(6),
microSleep = cms.int32(0),
frdFileVersion = cms.uint32(0)
)
| [
"[email protected]"
]
| |
75909244f23ef13c6850631c801a95fcc525f524 | e32ee307e4c59cc18f9dea18d797784a1b23148f | /calculate the number of local extrema in the given array..py | b2eb8e2bd69cb0f68b09931e45bd4707c0c00a29 | []
| no_license | GuhanSGCIT/SGCIT | f4ab44346186d45129c74cbad466c6614f9f0f08 | 8b2e5ccf693384aa22aa9d57f39b63e4659f6261 | refs/heads/master | 2020-07-11T05:47:54.033120 | 2020-07-07T05:02:41 | 2020-07-07T05:02:41 | 204,459,836 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | n = int(input())
l = [int(x) for x in input().split()]
count = 0
for i in range(1, n-1):
if (l[i]>l[i-1] and l[i]>l[i+1]) or (l[i]<l[i-1] and l[i]<l[i+1]):
count+=1
print(count)
| [
"[email protected]"
]
| |
7138199d17ce5d21d5395a8ea2228f815ea2bb79 | 27acb207b21b4572561de4a5f7dfb9740318c0b8 | /Python-Data-Representations/Week1/Ex6_W1_substring.py | b5a1afe3b91a4d51ec0978800eac5b19ff906c2d | []
| no_license | iamieht/intro-scripting-in-python-specialization | ee836ef05b62f6c74fe8da3ee137687b4d0035cf | 8ea4f85f0ed3dcd541f89521c013335e9eb32980 | refs/heads/master | 2021-01-16T05:35:51.616276 | 2020-06-08T18:39:45 | 2020-06-08T18:39:45 | 242,993,577 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | """
Function that tests for substring
"""
def is_substring(example_string, test_string):
"""
Function that returns True if test_string
is a substring of example_string and False otherwise
"""
# enter one line of code for substring test here
return test_string in example_string
# Tests
example_string = "It's just a flesh wound."
print(is_substring(example_string, "just"))
print(is_substring(example_string, "flesh wound"))
print(is_substring(example_string, "piddog"))
print(is_substring(example_string, "it's"))
print(is_substring(example_string, "It's"))
# Output
#True
#True
#False
#False
#True | [
"[email protected]"
]
| |
6f13f1e1e5fad0a19e704f17be7866134efb141e | eda9187adfd53c03f55207ad05d09d2d118baa4f | /tensorboardX/demo.py | 1fb77accb7db02f58576ac23e1ac78b36108156f | []
| no_license | HuiZhaozh/python_tutorials | 168761c9d21ad127a604512d7c6c6b38b4faa3c7 | bde4245741081656875bcba2e4e4fcb6b711a3d9 | refs/heads/master | 2023-07-07T20:36:20.137647 | 2020-04-24T07:18:25 | 2020-04-24T07:18:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,306 | py | # -*- coding:utf-8 -*-
# /usr/bin/python
'''
-------------------------------------------------
File Name : demo
Description :
Envs :
Author : yanerrol
Date : 2020/2/3 21:13
-------------------------------------------------
Change Activity:
2020/2/3 21:13:
-------------------------------------------------
'''
__author__ = 'yanerrol'
import torch
import torchvision.utils as vutils
import numpy as np
import torchvision.models as models
from torchvision import datasets
from tensorboardX import SummaryWriter
import datetime
resnet18 = models.resnet18(False)
writer = SummaryWriter()
sample_rate = 44100
freqs = [262, 294, 330, 349, 392, 440, 440, 440, 440, 440, 440]
true_positive_counts = [75, 64, 21, 5, 0]
false_positive_counts = [150, 105, 18, 0, 0]
true_negative_counts = [0, 45, 132, 150, 150]
false_negative_counts = [0, 11, 54, 70, 75]
precision = [0.3333333, 0.3786982, 0.5384616, 1.0, 0.0]
recall = [1.0, 0.8533334, 0.28, 0.0666667, 0.0]
for n_iter in range(100):
s1 = torch.rand(1) # value to keep
s2 = torch.rand(1)
# data grouping by `slash`
writer.add_scalar('data/scalar_systemtime', s1[0], n_iter)
# data grouping by `slash`
writer.add_scalar('data/scalar_customtime', s1[0], n_iter, walltime=n_iter)
writer.add_scalars('data/scalar_group', {"xsinx": n_iter * np.sin(n_iter),
"xcosx": n_iter * np.cos(n_iter),
"arctanx": np.arctan(n_iter)}, n_iter)
x = torch.rand(32, 3, 64, 64) # output from network
if n_iter % 10 == 0:
x = vutils.make_grid(x, normalize=True, scale_each=True)
writer.add_image('Image', x, n_iter) # Tensor
writer.add_image_with_boxes('imagebox_label', torch.ones(3, 240, 240) * 0.5,
torch.Tensor([[10, 10, 100, 100], [101, 101, 200, 200]]),
n_iter,
labels=['abcde' + str(n_iter), 'fgh' + str(n_iter)])
x = torch.zeros(sample_rate * 2)
for i in range(x.size(0)):
# sound amplitude should in [-1, 1]
x[i] = np.cos(freqs[n_iter // 10] * np.pi *
float(i) / float(sample_rate))
writer.add_audio('myAudio', x, n_iter)
writer.add_text('Text', 'text logged at step:' + str(n_iter), n_iter)
writer.add_text('markdown Text', '''a|b\n-|-\nc|d''', n_iter)
for name, param in resnet18.named_parameters():
if 'bn' not in name:
writer.add_histogram(name, param, n_iter)
writer.add_pr_curve('xoxo', np.random.randint(2, size=100), np.random.rand(
100), n_iter) # needs tensorboard 0.4RC or later
writer.add_pr_curve_raw('prcurve with raw data', true_positive_counts,
false_positive_counts,
true_negative_counts,
false_negative_counts,
precision,
recall, n_iter)
# export scalar data to JSON for external processing
writer.export_scalars_to_json("./all_scalars.json")
dataset = datasets.MNIST('mnist', train=False, download=True)
images = dataset.test_data[:100].float()
label = dataset.test_labels[:100]
features = images.view(100, 784)
writer.add_embedding(features, metadata=label, label_img=images.unsqueeze(1))
writer.add_embedding(features, global_step=1, tag='noMetadata')
dataset = datasets.MNIST('mnist', train=True, download=True)
images_train = dataset.train_data[:100].float()
labels_train = dataset.train_labels[:100]
features_train = images_train.view(100, 784)
all_features = torch.cat((features, features_train))
all_labels = torch.cat((label, labels_train))
all_images = torch.cat((images, images_train))
dataset_label = ['test'] * 100 + ['train'] * 100
all_labels = list(zip(all_labels, dataset_label))
writer.add_embedding(all_features, metadata=all_labels, label_img=all_images.unsqueeze(1),
metadata_header=['digit', 'dataset'], global_step=2)
# VIDEO
vid_images = dataset.train_data[:16 * 48]
vid = vid_images.view(16, 48, 1, 28, 28) # BxTxCxHxW
writer.add_video('video', vid_tensor=vid)
writer.add_video('video_1_fps', vid_tensor=vid, fps=1)
writer.close() | [
"[email protected]"
]
| |
4a8a08909397b5d1c28e2f029ec69e5bba7a0535 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2437/60586/311745.py | df394328050a5b32f1a4d7b71b3a5abaa5a94c4e | []
| no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | x=input()
if x=="6 2 ":
print(6,end="")
if x=="6 3 ":
print(1,end="")
elif x=="8 3 ":
print(3,end="")
elif x=="8 5 ":
print(0,end="")
else:
print(x) | [
"[email protected]"
]
| |
d823fca9b27f34af478f6c88c97725a4014d1c14 | c7aadaba9ee8f8f28cf1b2fc604d671f12675b49 | /src/transient/diffusion/d3_d2D.py | 2085a7f7796dc3b1d05dc6336268aa3832a7d63b | []
| no_license | ellipsis14/fenics-tutorial | 2147656822afa36e4e6b8d39e9728d63708d6c73 | a1d9a7352675048b9d7f388b9b737701e7e78399 | refs/heads/master | 2021-01-15T23:45:09.826960 | 2015-03-04T10:46:33 | 2015-03-04T10:46:33 | 31,659,473 | 1 | 0 | null | 2015-03-04T13:54:36 | 2015-03-04T13:54:36 | null | UTF-8 | Python | false | false | 3,107 | py | """
FEniCS tutorial demo program: Diffusion equation with Dirichlet
conditions and a solution that will be exact at all nodes.
As d2_d2D.py, but here we test various start vectors for iterative
solution of the linear system at each time level.
The script d3_d2D_script.py runs experiments with different start
vectors and prints out the number of iterations.
"""
from dolfin import *
import numpy, sys
numpy.random.seed(12)
# zero, random, default, last
initial_guess = 'zero' if len(sys.argv) == 1 else sys.argv[1]
# PETSc, Epetra, MTL4,
la_backend = 'PETSc' if len(sys.argv) <= 2 else sys.argv[2]
parameters['linear_algebra_backend'] = la_backend
# Create mesh and define function space
nx = ny = 40
mesh = UnitSquareMesh(nx, ny)
V = FunctionSpace(mesh, 'Lagrange', 1)
# Define boundary conditions
alpha = 3; beta = 1.2
u0 = Expression('1 + x[0]*x[0] + alpha*x[1]*x[1] + beta*t',
alpha=alpha, beta=beta, t=0)
class Boundary(SubDomain): # define the Dirichlet boundary
def inside(self, x, on_boundary):
return on_boundary
boundary = Boundary()
bc = DirichletBC(V, u0, boundary)
# Initial condition
u_1 = interpolate(u0, V)
u_2 = Function(V)
#u_1 = project(u0, V) # will not result in exact solution!
dt = 0.9 # time step
T = 10*dt # total simulation time
# Define variational problem
# Laplace term
u = TrialFunction(V)
v = TestFunction(V)
a_K = inner(nabla_grad(u), nabla_grad(v))*dx
# "Mass matrix" term
a_M = u*v*dx
M = assemble(a_M)
K = assemble(a_K)
A = M + dt*K
bc.apply(A)
# f term
f = Expression('beta - 2 - 2*alpha', beta=beta, alpha=alpha)
# Linear solver initialization
#solver = KrylovSolver('cg', 'ilu')
solver = KrylovSolver('gmres', 'ilu')
#solver = KrylovSolver('gmres', 'none') # cg doesn't work, probably because matrix bc makes it nonsymmetric
solver.parameters['absolute_tolerance'] = 1E-5
solver.parameters['relative_tolerance'] = 1E-17 # irrelevant
solver.parameters['maximum_iterations'] = 10000
if initial_guess == 'default':
solver.parameters['nonzero_initial_guess'] = False
else:
solver.parameters['nonzero_initial_guess'] = True
u = Function(V)
set_log_level(DEBUG)
print 'nonzero initial guess:', solver.parameters['nonzero_initial_guess']
# Compute solution
u = Function(V)
t = dt
while t <= T:
print 'time =', t
# f.t = t # if time-dep f
f_k = interpolate(f, V)
F_k = f_k.vector()
b = M*u_1.vector() + dt*M*F_k
u0.t = t
bc.apply(b) # BIG POINT!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if initial_guess == 'zero':
u.vector()[:] = 0
elif initial_guess == 'last':
pass
elif initial_guess == 'random':
u.vector()[:] = numpy.random.uniform(-1, 1, V.dim())
elif t >= 2*dt and initial_guess == 'extrapolate':
u.vector()[:] = 2*u_1.vector() - u_2.vector()
solver.solve(A, u.vector(), b)
# Verify
u_e = interpolate(u0, V)
u_e_array = u_e.vector().array()
u_array = u.vector().array()
print 'Max error, t=%-10.3f:' % t, numpy.abs(u_e_array - u_array).max()
t += dt
u_2.assign(u_1)
u_1.assign(u)
| [
"[email protected]"
]
| |
8bf896583d058f0c4eb88b11b3e5b5b50bbfd43c | 749f867b96f4021cf80b1c298db6b14756a23cd0 | /030CAICT-AtlasToolkit/main_last_v1.py | 46de87b5b0d20deeca1e6cc52ada7a11c4a6d382 | []
| no_license | mandeling/Crawler4Caida | 4e4ae53ca64bff140d1353171c774522103aace4 | 4f85526d6ea49e7206038e0c9b8f4d87b488bd45 | refs/heads/master | 2022-12-23T22:25:03.815280 | 2020-09-23T10:31:34 | 2020-09-23T10:31:34 | 297,939,217 | 1 | 0 | null | 2020-09-23T10:50:56 | 2020-09-23T10:50:55 | null | UTF-8 | Python | false | false | 12,745 | py | # coding:utf-8
"""
create on Feb 29. 2020 By Wenyan YU
Function:
实现CAICT地图绘制工具箱(CAICT-AtlasToolkit)的主界面
"""
from tkinter import *
import tkinter as tk
from tkinter import ttk
import tkinter.messagebox
import tkinter.filedialog
from ttkthemes import ThemedTk, ThemedStyle
def get_screen_size(window):
return window.winfo_screenwidth(), window.winfo_screenheight()
def get_window_size(window):
return window.winfo_reqwidth(), window.winfo_reqheight()
def center_window(root, width, height):
screenwidth = root.winfo_screenwidth()
screenheight = root.winfo_screenheight()
size = '%dx%d+%d+%d' % (width, height, (screenwidth - width) / 8, (screenheight - height) / 8)
# print(size)
root.geometry(size)
class App:
def __init__(self, root):
"""
初始化界面
:param root:
"""
# 初始化参数
self.aim_v_radio = tk.IntVar() # 绘图目标单选按钮值
self.tool_v_radio = tk.IntVar() # 绘图工具单选按钮值
self.root = root
# 增加菜单栏
menu_bar = Menu(root)
root.config(menu=menu_bar)
# #增加文件一级菜单
file_menu = Menu(menu_bar, tearoff=0)
menu_bar.add_cascade(label="文件(F)", menu=file_menu)
file_menu.add_command(label="新建画布")
file_menu.add_command(label="打开文件")
file_menu.add_separator()
file_menu.add_command(label="退出", command=self.quit)
# #增加工作区一级菜单
workplace_menu = Menu(menu_bar, tearoff=0)
menu_bar.add_cascade(label="工作区", menu=workplace_menu)
workplace_menu.add_command(label="返回主页", command=self.return_main)
# #增加视图一级菜单
view_menu = Menu(menu_bar, tearoff=0)
menu_bar.add_cascade(label="视图(V)", menu=view_menu)
view_menu.add_command(label="全屏")
# #增加工具一级菜单
tool_menu = Menu(menu_bar, tearoff=0)
menu_bar.add_cascade(label="工具(T)", menu=tool_menu)
tool_menu.add_command(label="选项")
tool_menu.add_command(label="在线文档和支持")
# #增加窗口一级菜单
window_menu = Menu(menu_bar, tearoff=0)
menu_bar.add_cascade(label="窗口(W)", menu=window_menu)
window_menu.add_command(label="配置")
# #增加帮助一级菜单
help_menu = Menu(menu_bar, tearoff=0)
menu_bar.add_cascade(label="帮助(H)", menu=help_menu)
help_menu.add_command(label="检查更新")
help_menu.add_command(label="关于")
# 增加左边画布 Frame
self.cv_frame = Frame(root, width=600, height=685, bg='#fff2cc')
self.cv_frame.grid(row=0, rowspan=5, column=0, sticky=W)
self.cv = Canvas(self.cv_frame, width=600, height=685, bg='#fff2cc')
self.cv.grid(row=0, column=0)
"""
显示画布中的图片
"""
global image
global cv_bg
cv_bg = PhotoImage(file="./cv_bg.PNG")
image = self.cv.create_image(600, 685, ancho='se', image=cv_bg)
# 增加右边功能 Frame
func_frame_top = Frame(root, width=160)
func_frame_top.grid(row=0, column=1, sticky=N)
func_frame_mid = Frame(root, width=160)
func_frame_mid.grid(row=1, column=1, sticky=N)
func_frame_bottom = Frame(root, width=160)
func_frame_bottom.grid(row=4, column=1, sticky=S)
# # 增加绘图向导Button
Button(func_frame_top, command=self.draw_guide_init, text="绘图向导", anchor="e", width=21, fg='white', bg='#4bacc6').grid(row=0, column=0, sticky=N)
# # 增加作品一览Button
Button(func_frame_top, text="作品一览", anchor="e", width=21, fg='white', bg='#4bacc6').grid(row=1, column=0, sticky=N)
# # 增加绘图工具Button
Button(func_frame_mid, text="绘图工具", anchor="e", width=21, fg='white', bg='#c05046').grid(row=0, column=0, sticky=S)
# # 增加绘图工具 01网络拓扑图(2D)Button
Button(func_frame_mid, text="01网络拓扑图(2D)", anchor="e", width=21, fg='white', bg='#9dbb61').grid(row=1, column=0, sticky=W)
# # 增加绘图工具 02网络拓扑图(3D)Button
Button(func_frame_mid, text="02网络拓扑图(3D)", anchor="e", width=21, fg='white', bg='#9dbb61').grid(row=2, column=0, sticky=W)
# # 以此类推
Button(func_frame_mid, text="03极坐标图", anchor="e", width=21, fg='white', bg='#9dbb61').grid(row=3, column=0, sticky=W)
Button(func_frame_mid, text="04星云图", anchor="e", width=21, fg='white', bg='#9dbb61').grid(row=4, column=0, sticky=W)
Button(func_frame_mid, text="05词汇云图", anchor="e", width=21, fg='white', bg='#9dbb61').grid(row=5, column=0, sticky=W)
Button(func_frame_mid, text="06主题河流图", anchor="e", width=21, fg='white', bg='#9dbb61').grid(row=6, column=0, sticky=W)
Button(func_frame_mid, text="07地理图绘制系列", anchor="e", width=21, fg='white', bg='#9dbb61').grid(row=7, column=0, sticky=W)
# #添加关于按钮
Button(func_frame_bottom, text="关于", anchor="e", width=21, fg='white', bg='#4bacc6').grid(row=8, column=0, sticky=S)
def quit(self):
# 结束主事件循环
self.root.quit() # 关闭窗口
self.root.destroy() # 将所有的窗口小部件进行销毁,回收内存
exit()
def draw_guide_init(self):
""""
点击绘图向导后,界面的初始化
"""
print("Event:绘图向导")
# # 清空画布
# self.cv.delete(image)
# 初始化绘图向导UI frame
for widget in self.cv_frame.winfo_children():
widget.destroy()
# 开始添加绘图向导界面相关控件
# 增加绘图目标Label Frame
self.cv_frame = Frame(root, width=600, height=685, bg='#fff2cc')
self.cv_frame.grid(row=0, rowspan=5, column=0, sticky=N)
aim_frame = LabelFrame(self.cv_frame, text="第一步:确定绘图目标", width=600, height=60, bg='#fff2cc')
aim_frame.grid(row=0, column=0, sticky=W)
aim_frame.grid_propagate(0) # 组件大小不变
# #给绘图目标Label Frame里面添加Radiobutton
aim_list = ["希望展示数据间的关联关系(小规模网络拓扑)",
"希望展示数据间的关联关系(大规模网络拓扑)",
"希望展示数据间的地位排名",
"希望进行数据地理位置展示",
"希望分析文本数据词频信息",
"希望展示多类时间序列数据"]
# for i in range(0, len(aim_list)):
# Radiobutton(aim_frame, text=aim_list[i], command=self.call_aim_rb, variable=self.aim_v_radio, value=i, bg='#fff2cc').grid(row=i, column=0, sticky=W)
comvalue_aim = StringVar()
c_aim = ttk.Combobox(aim_frame, textvariable=comvalue_aim, width=80)
c_aim["values"] = aim_list
c_aim.current(1)
c_aim.grid(row=0, column=0, sticky=W)
# 根据第一步的选择自动给出绘图实例
def call_aim_rb(self):
"""
绘图目标单选按钮单击事件,生成绘图工具选择、导出绘图数据格式、个性化数据处理、用户上传绘图数据、用户获取绘图结果(绘图参数调优)、目标反馈与评价
:return:
"""
tool_frame = LabelFrame(self.cv_frame, text="第二步:选择绘图工具", width=600, height=80, bg='#fff2cc')
tool_frame.grid(row=1, column=0, sticky=W)
tool_frame.grid_propagate(0) # 组件大小不变
# 导出绘图数据格式
export_frame = LabelFrame(self.cv_frame, text="第三步:导出数据格式", width=600, height=50, bg='#fff2cc')
export_frame.grid(row=2, column=0, sticky=W)
export_frame.grid_propagate(0) # 组件大小不变
if self.aim_v_radio.get() == 0:
# 希望展示数据间的关联关系(小规模网络拓扑), 01 02图例均可
# 先清空tool_frame
for widget in tool_frame.winfo_children():
widget.destroy()
tool_list = ["01网络拓扑图(2D)",
"02网络拓扑图(3D)"]
for i in range(0, len(tool_list)):
Radiobutton(tool_frame, text=tool_list[i], variable=self.tool_v_radio, value=i, bg='#fff2cc').grid(row=i, column=0, sticky=W)
elif self.aim_v_radio.get() == 1:
# 希望展示数据间的关联关系(大规模网络拓扑), 04图例
# 先清空tool_frame
for widget in tool_frame.winfo_children():
widget.destroy()
tool_list = ["04星云图"]
for i in range(0, len(tool_list)):
Radiobutton(tool_frame, text=tool_list[i], variable=self.tool_v_radio, value=i, bg='#fff2cc').grid(row=i, column=0, sticky=W)
elif self.aim_v_radio.get() == 2:
# 希望展示数据间的地位排名, 03图例
# 先清空tool_frame
for widget in tool_frame.winfo_children():
widget.destroy()
tool_list = ["03极坐标图"]
for i in range(0, len(tool_list)):
Radiobutton(tool_frame, text=tool_list[i], variable=self.tool_v_radio, value=i, bg='#fff2cc').grid(row=i, column=0, sticky=W)
elif self.aim_v_radio.get() == 3:
# 希望进行数据地理位置展示, 07图例
# 先清空tool_frame
for widget in tool_frame.winfo_children():
widget.destroy()
tool_list = ["07地理图绘制系列"]
for i in range(0, len(tool_list)):
Radiobutton(tool_frame, text=tool_list[i], variable=self.tool_v_radio, value=i, bg='#fff2cc').grid(row=i, column=0, sticky=W)
elif self.aim_v_radio.get() == 4:
# 希望分析文本数据词频信息, 05图例
# 先清空tool_frame
for widget in tool_frame.winfo_children():
widget.destroy()
tool_list = ["05词汇云图"]
for i in range(0, len(tool_list)):
Radiobutton(tool_frame, text=tool_list[i], variable=self.tool_v_radio, value=i, bg='#fff2cc').grid(row=i, column=0, sticky=W)
elif self.aim_v_radio.get() == 5:
# 希望展示多类时间序列数据, 06图例
# 先清空tool_frame
for widget in tool_frame.winfo_children():
widget.destroy()
tool_list = ["06主题河流图"]
for i in range(0, len(tool_list)):
Radiobutton(tool_frame, text=tool_list[i], variable=self.tool_v_radio, value=i, bg='#fff2cc').grid(row=i, column=0, sticky=W)
# 个性化数据处理
process_frame = LabelFrame(self.cv_frame, text="第四步:个性数据处理", width=600, height=100, bg='#fff2cc')
process_frame.grid(row=3, column=0, sticky=W)
process_frame.grid_propagate(0) # 组件大小不变
# 用户上传绘图数据
upload_frame = LabelFrame(self.cv_frame, text="第五步:上传绘图数据", width=600, height=50, bg='#fff2cc')
upload_frame.grid(row=4, column=0, sticky=W)
upload_frame.grid_propagate(0) # 组件大小不变
# 用户获取绘图结果(绘图参数调优)
result_frame = LabelFrame(self.cv_frame, text="第六步:获取绘图结果", width=600, height=50, bg='#fff2cc')
result_frame.grid(row=5, column=0, sticky=W)
result_frame.grid_propagate(0) # 组件大小不变
# 目标反馈与评价
feedback_frame = LabelFrame(self.cv_frame, text="第七步:目标反馈评价", width=600, height=50, bg='#fff2cc')
feedback_frame.grid(row=6, column=0, sticky=W)
feedback_frame.grid_propagate(0) # 组件大小不变
def return_main(self):
"""
回到主页
:return:
"""
print("Event:回到主页")
self.__init__(self.root)
if __name__ == "__main__":
# 创建一个Top Level的根窗口, 并把他们作为参数实例化为App对象
# root = tk.Tk()
root = ThemedTk(theme="arc")
root.title("CAICT地图绘制工具箱(CAICT-AtlasToolkit)")
center_window(root, 0, 0) # 设置窗口位置
# root.maxsize(750, 800)
root.minsize(770, 690) # 设置窗口最小尺寸
root.resizable(0, 0) # 锁定尺寸
# root.attributes("-alpha", 0.80)
app = App(root)
# 开始主事件循环
root.mainloop()
| [
"[email protected]"
]
| |
0361b75dc0630118ca7291ef92d6eedb19e0f3ed | f0c35cd1d458f2f9ec1c605d73b9fc4738f62986 | /web/admin/forms.py | 59ea21852a00a9dacdc2d9f95b918f1dafa08ad3 | []
| no_license | dougmpx/xiaoli | 9e57c7bdd1d6e9ab55adb657ad5fa9d10dbe2a50 | 88f28754d1a67351b90461ad004ca5d36dde1e02 | refs/heads/master | 2021-04-15T07:39:06.655988 | 2013-01-05T08:10:02 | 2013-01-05T08:10:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,099 | py | #coding=utf-8
from tango.models import db, Category
from nodes.models import Vendor, Model
from .models import Miboid, Module
from flask_wtf import Form, TextField, PasswordField, HiddenField, SelectField, IntegerField, \
QuerySelectField, TextAreaField, widgets, ValidationError, required, equal_to, email
class SearchForm(Form):
keyword = TextField()
class CategoryForm(Form):
id = TextField(validators=[required(message=u'必填')])
obj = TextField(u'分组', [required(message=u'必填')])
name = TextField(u'名称', [required(message=u'必填')])
alias = TextField(u'显示名', [required(message=u'必填')])
is_valid = SelectField(u'有效性', [required(message=u'必填')], choices=[(u'0', u'无效'),(u'1', u'有效')])
class PermissionForm(Form):
endpoint = TextField(u'Endpoint')
module_text = TextField(u'模块显示名')
name = TextField(u'子模块显示名')
operation = TextField(u'操作名')
default_permission = SelectField(u'有效性', [required(message=u'必填')], choices=[(u'0', u'无权限'),(u'1', u'有权限')])
next = HiddenField()
class VendorForm(Form):
name = TextField(u'名称', [required(message=u'必填')])
alias = TextField(u'显示名', [required(message=u'必填')])
url = TextField(u'厂商主页')
is_valid = SelectField(u'有效性', [required(message=u'必填')], choices=[(u'0', u'无效'),(u'1', u'有效')])
class ModelForm(Form):
category = QuerySelectField(u'类别', get_label=u'alias',
query_factory=lambda: Category.query.filter_by(obj='node'))
name = TextField(u'名称', [required(message=u'必填')])
alias = TextField(u'显示名', [required(message=u'必填')])
sysoid = TextField(u'Sysoid')
vendor = QuerySelectField(u'厂商', get_label=u'alias',
query_factory=lambda: Vendor.query)
is_valid = SelectField(u'有效性', [required(message=u'必填')], choices=[(u'0', u'无效'),(u'1', u'有效')])
remark = TextAreaField(u'备注')
class SysoidForm(Form):
sysoid = TextField(u'SysOid', [required(message=u'必填')])
model = QuerySelectField(u'设备型号', get_label=u'alias',
query_factory=lambda:Model.query)
disco = TextField(u'发现模块')
mib = QuerySelectField(u'Mib文件', get_pk=lambda x: x, get_label=lambda x: x,
query_factory=lambda: [m[0] for m in db.session.query(Miboid.mib).distinct().all()])
remark = TextAreaField(u'备注')
class ModuleForm(Form):
name = TextField(u'名称', [required(message=u'必填')])
alias = TextField(u'显示名', [required(message=u'必填')])
period = IntegerField(u'周期(min)')
retries = IntegerField(u'重试次数(次)')
timeout = IntegerField(u'超时(s)')
remark = TextAreaField(u'备注')
class MonitorForm(Form):
category = TextField(u'分类')
vendor = TextField(u'供应商')
sysoid = TextField(u'Sysoid')
match = TextField(u'匹配规则')
module = QuerySelectField(u'采集模块', get_label=u'alias',
query_factory=lambda:Module.query)
mib = QuerySelectField(u'Mib文件', get_pk=lambda x: x, get_label=lambda x: x,
query_factory=lambda: [m[0] for m in db.session.query(Miboid.mib).distinct().all()])
remark = TextAreaField(u'备注')
class MiboidForm(Form):
mib = TextField(u'mib', [required(message=u'必填')])
grp = TextField(u'分组', [required(message=u'必填')])
name = TextField(u'名称', [required(message=u'必填')])
alias = TextField(u'显示名', [required(message=u'必填')])
oid = TextField(u'oid')
is_valid = SelectField(u'有效性', [required(message=u'必填')], choices=[(u'0', u'无效'),(u'1', u'有效')])
remark = TextAreaField(u'备注')
| [
"[email protected]"
]
| |
b088b7e8a4069b741246eaf5ac68d6faad85613b | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p04012/s874951633.py | 7b934360297ee1e1391f1376a323f92dc1ecebb8 | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | # coding: utf-8
w = list(input())
w_ = list(set(w))
flg = True
for a in w_:
if w.count(a)%2 != 0:
flg = False
if flg:
print("Yes")
else:
print('No') | [
"[email protected]"
]
| |
c5b5216e50a35624832cb3c83ef89b17bad936c6 | fc3f784c8d00f419b11cbde660fe68a91fb080ca | /algoritm/20상반기 코딩테스트/보급로/1249.py | f8cb979771655a3bd22b8164a902086c5eea5c12 | []
| no_license | choo0618/TIL | 09f09c89c8141ba75bf92657ac39978913703637 | 70437a58015aecee8f3d86e6bfd0aa8dc11b5447 | refs/heads/master | 2021-06-25T07:01:34.246642 | 2020-12-21T04:57:13 | 2020-12-21T04:57:13 | 163,782,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | import sys
sys.stdin = open('1249.txt','r')
from collections import deque
dx=[1,0,-1,0]
dy=[0,1,0,-1]
def IS(y,x):
return -1<y<N and -1<x<N
for t in range(int(input())):
N=int(input())
A=[list(map(int,input()))for y in range(N)]
Map=[[10**9]*N for _ in range(N)]
Q=deque([(0,0,0)])
while Q:
c,y,x=Q.popleft()
if Map[y][x]<c:continue
for d in range(4):
Y,X=y+dy[d],x+dx[d]
if not IS(Y,X) or Map[Y][X]<=c+A[Y][X]:continue
Map[Y][X]=c+A[Y][X]
Q.append((c+A[Y][X],Y,X))
print('#%d %d'%(t+1,Map[N-1][N-1])) | [
"[email protected]"
]
| |
50f4218bab8cab402a3642b888fffb7a6a8f06f5 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-ief/huaweicloudsdkief/v1/model/update_edge_node_device_response.py | c0a8a018e150454b0fe2df63d8f1a2d583739033 | [
"Apache-2.0"
]
| permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 6,496 | py | # coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class UpdateEdgeNodeDeviceResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'delete_connector': 'bool',
'deploy_connector': 'bool',
'deployment_id': 'str',
'update_devices': 'NodeDevice'
}
attribute_map = {
'delete_connector': 'delete_connector',
'deploy_connector': 'deploy_connector',
'deployment_id': 'deployment_id',
'update_devices': 'update_devices'
}
def __init__(self, delete_connector=None, deploy_connector=None, deployment_id=None, update_devices=None):
"""UpdateEdgeNodeDeviceResponse
The model defined in huaweicloud sdk
:param delete_connector: 工业终端设备预留字段
:type delete_connector: bool
:param deploy_connector: 工业终端设备预留字段
:type deploy_connector: bool
:param deployment_id: 工业终端设备预留字段
:type deployment_id: str
:param update_devices:
:type update_devices: :class:`huaweicloudsdkief.v1.NodeDevice`
"""
super(UpdateEdgeNodeDeviceResponse, self).__init__()
self._delete_connector = None
self._deploy_connector = None
self._deployment_id = None
self._update_devices = None
self.discriminator = None
if delete_connector is not None:
self.delete_connector = delete_connector
if deploy_connector is not None:
self.deploy_connector = deploy_connector
if deployment_id is not None:
self.deployment_id = deployment_id
if update_devices is not None:
self.update_devices = update_devices
@property
def delete_connector(self):
"""Gets the delete_connector of this UpdateEdgeNodeDeviceResponse.
工业终端设备预留字段
:return: The delete_connector of this UpdateEdgeNodeDeviceResponse.
:rtype: bool
"""
return self._delete_connector
@delete_connector.setter
def delete_connector(self, delete_connector):
"""Sets the delete_connector of this UpdateEdgeNodeDeviceResponse.
工业终端设备预留字段
:param delete_connector: The delete_connector of this UpdateEdgeNodeDeviceResponse.
:type delete_connector: bool
"""
self._delete_connector = delete_connector
@property
def deploy_connector(self):
"""Gets the deploy_connector of this UpdateEdgeNodeDeviceResponse.
工业终端设备预留字段
:return: The deploy_connector of this UpdateEdgeNodeDeviceResponse.
:rtype: bool
"""
return self._deploy_connector
@deploy_connector.setter
def deploy_connector(self, deploy_connector):
"""Sets the deploy_connector of this UpdateEdgeNodeDeviceResponse.
工业终端设备预留字段
:param deploy_connector: The deploy_connector of this UpdateEdgeNodeDeviceResponse.
:type deploy_connector: bool
"""
self._deploy_connector = deploy_connector
@property
def deployment_id(self):
"""Gets the deployment_id of this UpdateEdgeNodeDeviceResponse.
工业终端设备预留字段
:return: The deployment_id of this UpdateEdgeNodeDeviceResponse.
:rtype: str
"""
return self._deployment_id
@deployment_id.setter
def deployment_id(self, deployment_id):
"""Sets the deployment_id of this UpdateEdgeNodeDeviceResponse.
工业终端设备预留字段
:param deployment_id: The deployment_id of this UpdateEdgeNodeDeviceResponse.
:type deployment_id: str
"""
self._deployment_id = deployment_id
@property
def update_devices(self):
"""Gets the update_devices of this UpdateEdgeNodeDeviceResponse.
:return: The update_devices of this UpdateEdgeNodeDeviceResponse.
:rtype: :class:`huaweicloudsdkief.v1.NodeDevice`
"""
return self._update_devices
@update_devices.setter
def update_devices(self, update_devices):
"""Sets the update_devices of this UpdateEdgeNodeDeviceResponse.
:param update_devices: The update_devices of this UpdateEdgeNodeDeviceResponse.
:type update_devices: :class:`huaweicloudsdkief.v1.NodeDevice`
"""
self._update_devices = update_devices
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdateEdgeNodeDeviceResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
]
| |
8e2285e97c33aaae42dc1d4463e35d6f6d1a9b56 | dffee54c9c40b495e56cd56d191aef0e4ebe6064 | /composer/core/algorithm.py | 25317300f7dca6dce28ebd33f352a1721d4460c4 | [
"Apache-2.0"
]
| permissive | zeeroocooll/composer | 3afb0427e713c3e19197c780f03b510fbf6c936b | 6dd0a0f297cafb404333d6280a5344bcb7f3bee6 | refs/heads/main | 2023-08-20T04:21:51.536149 | 2021-10-13T20:34:29 | 2021-10-13T20:34:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,933 | py | # Copyright 2021 MosaicML. All Rights Reserved.
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Optional
from composer.core.serializable import Serializable
if TYPE_CHECKING:
from composer.core import Event, Logger, State
class Algorithm(Serializable, ABC):
"""Base class for algorithms.
Algorithms are pieces of code which run at specific events in the training
loop. Algorithms modify the trainer's state, generally with the effect of
improving the model's quality, or
increasing the efficiency and throughput of the training loop.
Algorithms must implement two methods:
:func:`match`, which returns whether the algorithm should be run given
the current event and state, and :func:`apply`, which makes an in-place
change to the State.
"""
@property
def find_unused_parameters(self) -> bool:
"""Indicates that the effect of this algorithm may cause some model
parameters to be unused.
Used to tell DDP that some parameters will be frozen during
training and hence it should not expect gradients from them.
All algorithms which do any kind of parameter freezing should
override this function to return True.
"""
return False
@abstractmethod
def match(self, event: Event, state: State) -> bool:
"""Determines whether this algorithm should run, given the current
:class:`Event` and :class:`State`.
Examples:
To only run on a specific event:
>>> return event == Event.BEFORE_LOSS
Switching based on state attributes:
>>> return state.epoch > 30 && state.world_size == 1
See :class:`State` for accessible attributes.
Args:
event (:class:`Event`): The current event.
state (:class:`State`): The current state.
Returns:
bool: True if this algorithm should run now.
"""
raise NotImplementedError(f'implement match() required for {self.__class__.__name__}')
@abstractmethod
def apply(self, event: Event, state: State, logger: Logger) -> Optional[int]:
"""Applies the algorithm to make an in-place change to the State
Can optionally return an exit code to be stored in a :class:`Trace`.
Args:
event (:class:`Event`): The current event.
state (:class:`State`): The current state.
logger (:class:`Logger`): A logger to use for
logging algorithm-specific metrics.
Returns:
``int`` or ``None``: exit code that is stored in :class:`Trace`
and made accessible for debugging.
"""
raise NotImplementedError(f'implement apply() required for {self.__class__.__name__}')
def __str__(self) -> str:
"""Returns the class name."""
return self.__class__.__name__
| [
"[email protected]"
]
| |
5531e802e6e0131bfab313bbb6fe0f400f8fc8d2 | 698cb8d24879fe75669af6f2667c3f88660a0a1e | /FM/deepfm/deepfm_movielens_sample.py | 4d5736c139d3a64e02b438bc0dbd2fbacb19ae68 | []
| no_license | HuichuanLI/Recommand-Algorithme | c83c5d34d75eebd127e2aef7abc8b7152fc54f96 | 302e14a3f7e5d72ded73b72a538596b6dc1233ff | refs/heads/master | 2023-05-11T03:01:30.940242 | 2023-04-30T08:03:19 | 2023-04-30T08:03:19 | 187,097,782 | 71 | 19 | null | null | null | null | UTF-8 | Python | false | false | 1,684 | py | import pandas as pd
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from deepctr.models import DeepFM
from deepctr.inputs import SparseFeat,get_feature_names
#数据加载
data = pd.read_csv("movielens_sample.txt")
sparse_features = ["movie_id", "user_id", "gender", "age", "occupation", "zip"]
target = ['rating']
# 对特征标签进行编码
for feature in sparse_features:
lbe = LabelEncoder()
data[feature] = lbe.fit_transform(data[feature])
# 计算每个特征中的 不同特征值的个数
fixlen_feature_columns = [SparseFeat(feature, data[feature].nunique()) for feature in sparse_features]
print(fixlen_feature_columns)
linear_feature_columns = fixlen_feature_columns
dnn_feature_columns = fixlen_feature_columns
feature_names = get_feature_names(linear_feature_columns + dnn_feature_columns)
# 将数据集切分成训练集和测试集
train, test = train_test_split(data, test_size=0.2)
train_model_input = {name:train[name].values for name in feature_names}
test_model_input = {name:test[name].values for name in feature_names}
# 使用DeepFM进行训练
model = DeepFM(linear_feature_columns, dnn_feature_columns, task='regression')
model.compile("adam", "mse", metrics=['mse'], )
history = model.fit(train_model_input, train[target].values, batch_size=256, epochs=1, verbose=True, validation_split=0.2, )
# 使用DeepFM进行预测
pred_ans = model.predict(test_model_input, batch_size=256)
# 输出RMSE或MSE
mse = round(mean_squared_error(test[target].values, pred_ans), 4)
rmse = mse ** 0.5
print("test RMSE", rmse) | [
"[email protected]"
]
| |
babcd86669606969ca94181114c3944258ecfa56 | 6bdb32ddbd72c4337dab12002ff05d6966538448 | /gridpack_folder/mc_request/LHEProducer/Spin-1/Wprime_WZ_WhadZlep/Wprime_WZ_WhadZlep_narrow_M2000_13TeV-madgraph_cff.py | aef83982aeb269928c449b90de344527b31a631c | []
| no_license | cyrilbecot/DibosonBSMSignal_13TeV | 71db480de274c893ba41453025d01bfafa19e340 | d8e685c40b16cde68d25fef9af257c90bee635ba | refs/heads/master | 2021-01-11T10:17:05.447035 | 2016-08-17T13:32:12 | 2016-08-17T13:32:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 735 | py | import FWCore.ParameterSet.Config as cms
# link to cards:
# https://github.com/cms-sw/genproductions/tree/master/bin/MadGraph5_aMCatNLO/cards/production/13TeV/exo_diboson/Spin-1/Wprime_WZ_WhadZlep/Wprime_WZ_WhadZlep_narrow_M2000
externalLHEProducer = cms.EDProducer("ExternalLHEProducer",
args = cms.vstring('/cvmfs/cms.cern.ch/phys_generator/gridpacks/slc6_amd64_gcc481/13TeV/madgraph/V5_2.2.2/exo_diboson/Spin-1/Wprime_WZ_WhadZlep/narrow/v2/Wprime_WZ_WhadZlep_narrow_M2000_tarball.tar.xz'),
nEvents = cms.untracked.uint32(5000),
numberOfParameters = cms.uint32(1),
outputFile = cms.string('cmsgrid_final.lhe'),
scriptName = cms.FileInPath('GeneratorInterface/LHEInterface/data/run_generic_tarball_cvmfs.sh')
)
| [
"[email protected]"
]
| |
f82d94ad5533aa17f9c433b5546780f562802e2a | d1507ee333bf9453a197fe997b58871b527811bf | /venv/bin/automat-visualize | 51f0d1222abf19fd9b8ca755d742738686858191 | []
| no_license | hirossan4049/screenshare | a336f2cf0e0584866356a82f13683480d9d039f6 | 004f0e649116a6059af19d6489aeb13aed1741f3 | refs/heads/master | 2021-01-27T09:21:48.891153 | 2020-04-12T04:55:40 | 2020-04-12T04:55:40 | 243,476,234 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | #!/Users/linear/Documents/pg/pythonnnnn/screenshare/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from automat._visualize import tool
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(tool())
| [
"[email protected]"
]
| ||
7069d8dae75b1aa649b24c927694adb46dc57f3c | 732e1285934470ae04b20d64921a8cba20932875 | /neuedu_cnblogs_spider/pipelines.py | d19805a40bcea08c1a72fa65eb9c955cfba04a39 | []
| no_license | infant01han/neuedu_django_scrapy_es_cnblogs | 69ee11c7840b25b8ae6d37b21324389dfdacf371 | d293bae6ab5a7a360289afe35b7c3320dbce2dc8 | refs/heads/master | 2021-04-19T05:43:49.618157 | 2020-03-24T07:51:20 | 2020-03-24T07:51:20 | 249,584,790 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 327 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
class NeueduCnblogsSpiderPipeline(object):
def process_item(self, item, spider):
item.save_to_es()
return item
| [
"[email protected]"
]
| |
5649179f8c1bb20ed44f3c4504259fd0c3f51967 | 3c868540c8f5b0b9b46440e9b8e9160de9e8988f | /ch06/handle_with_condition.py | fe8d59c97207d94fc31608b8c1b50584d2ba69ac | []
| no_license | sarte3/python | cc8f41b8b22b0a980252d6546358dd212324e2cd | 15d984e5df03387950692092b6b5569adab845bb | refs/heads/master | 2023-01-18T18:37:40.720326 | 2020-11-17T08:43:27 | 2020-11-17T08:43:27 | 304,824,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 341 | py | user_input_a = input('정수 입력 > ')
if user_input_a.isdigit():
number_input_a = int(user_input_a)
print('원의 반지름 : ', number_input_a)
print('원의 둘레 : ', 2 * 3.14 * number_input_a)
print('원의 넓이 : ', 3.14 * number_input_a * number_input_a)
else:
print('정수를 입력하지 않았습니다') | [
"[email protected]"
]
| |
9922f2132d7a55e28ab30681e4779b4cd437e51a | 0a973640f0b02d7f3cf9211fcce33221c3a50c88 | /.history/src/easy-money_20210201120223.py | e793aeaa52c922c7f1eb6842bef7196a3a28ad87 | []
| no_license | JiajunChen123/IPO_under_review_crawler | 5468b9079950fdd11c5e3ce45af2c75ccb30323c | 031aac915ebe350ec816c05a29b5827fde588567 | refs/heads/main | 2023-02-26T08:23:09.622725 | 2021-02-04T10:11:16 | 2021-02-04T10:11:16 | 332,619,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,707 | py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
# 东方财富网 首发申报
import re
import pickle
from datetime import datetime, timedelta
from urllib.parse import urlencode
import pandas as pd
import requests
import re
import time
from bs4 import BeautifulSoup
import configparser
config = configparser.ConfigParser()
config.read('Config.ini')
headers = config['eastmoney']['headers']
base_url = onfig['eastmoney']['base_url']
def date_gen():
r = requests.get('http://data.eastmoney.com/xg/xg/sbqy.html',
headers=headers)
r.encoding = 'gbk'
soup = BeautifulSoup(r.text, 'html.parser')
dateList = [i.text for i in soup.findAll('option')]
yield dateList
def update_date():
r = requests.get('http://data.eastmoney.com/xg/xg/sbqy.html',
headers=headers)
r.encoding = 'gbk'
soup = BeautifulSoup(r.text, 'html.parser')
newDate = soup.find('option').get_text()
return newDate
from pathlib import Path
def update_eastmoneyData(newDate):
eastmoney_raw_data = Path(config['eastmoney']['raw_data'])
# 如果文件存在,执行更新
if eastmoney_raw_data.is_file():
# newDate = update_date()
# 如果有更新
if newDate != config['eastmoney']['lastDate']:
query = {
'type': 'NS',
'sty': 'NSFR',
'st': '1',
'sr': '-1',
'p': '1',
'ps': '5000',
'js': 'var IBhynDx={pages:(pc),data:[(x)]}',
'mkt': '1',
'fd': newDate,
'rt': '53721774'
}
url = base_url + urlencode(query)
rs = requests.get(url, headers=headers)
js = rs.text.split('var IBhynDx={pages:1,data:')[1]
data = eval(js[:-1])
temp = [i.split(',') for i in data]
columns = [
'会计师事务所', '保荐代表人', '保荐机构', 'xxx', '律师事务所', '日期', '所属行业', '板块',
'是否提交财务自查报告', '注册地', '类型', '机构名称', '签字会计师', '签字律师', '时间戳', '简称'
]
df = pd.DataFrame(temp, columns=columns)
df['文件链接'] = df['时间戳'].apply(
lambda x: "https://notice.eastmoney.com/pdffile/web/H2_" + x + "_1.pdf"
)
df = df[[
'机构名称', '类型', '板块', '注册地', '保荐机构', '保荐代表人', '律师事务所', '签字律师', '会计师事务所',
'签字会计师', '是否提交财务自查报告', '所属行业', '日期', 'xxx', '时间戳', '简称', '文件链接'
]]
df = df[df['板块'] != '创业板']
df.replace({'是否提交财务自查报告': ' '}, '是')
df.replace({'是否提交财务自查报告': '不适用'}, '是')
df['机构名称'] = df['机构名称'].replace(r'\*', '', regex=True)
df['机构名称'] = df['机构名称'].replace(r'股份有限公司', '', regex=True)
df.to_csv(
'C:/Users/chen/Desktop/IPO_info/EastMoney/eastmoney_raw_data.csv',mode='a',
index=False, header=False, encoding='utf-8-sig')
else:
dateList = date_gen()
get_eastmoneyData(dateList)
return df
def get_eastmoneyData(dateList):
query = {
'type': 'NS',
'sty': 'NSFR',
'st': '1',
'sr': '-1',
'p': '1',
'ps': '5000',
'js': 'var IBhynDx={pages:(pc),data:[(x)]}',
'mkt': '1',
'rt': '53721774'
}
main_data = []
for date in dateList:
print('fetching date: ',date)
query['fd'] = date
# start = datetime.strptime('2017-01-05','%Y-%m-%d').date()
# while start < datetime.today().date():
# query['fd'] = start
url = base_url + urlencode(query)
# yield url
# start += timedelta(days=7)
rs = requests.get(url, headers=headers)
if rs.text == '':
continue
js = rs.text.split('var IBhynDx={pages:1,data:')[1]
data = eval(js[:-1])
main_data.extend(data)
time.sleep(2)
temp = [i.split(',') for i in main_data]
columns = [
'会计师事务所', '保荐代表人', '保荐机构', 'xxx', '律师事务所', '日期', '所属行业', '板块',
'是否提交财务自查报告', '注册地', '类型', '机构名称', '签字会计师', '签字律师', '时间戳', '简称'
]
df = pd.DataFrame(temp, columns=columns)
df['文件链接'] = df['时间戳'].apply(
lambda x: "https://notice.eastmoney.com/pdffile/web/H2_" + x + "_1.pdf"
)
df = df[[
'机构名称', '类型', '板块', '注册地', '保荐机构', '保荐代表人', '律师事务所', '签字律师', '会计师事务所',
'签字会计师', '是否提交财务自查报告', '所属行业', '日期', 'xxx', '时间戳', '简称', '文件链接'
]]
df = df[df['板块'] != '创业板']
df.replace({'是否提交财务自查报告': ' '}, '是')
df.replace({'是否提交财务自查报告': '不适用'}, '是')
df['机构名称'] = df['机构名称'].replace(r'\*', '', regex=True)
df['机构名称'] = df['机构名称'].replace(r'股份有限公司', '', regex=True)
df = df[df['板块'] != '创业板']
df.to_csv(
'C:/Users/chen/Desktop/IPO_info/EastMoney/eastmoney_raw_data.csv',
index=False,
encoding='utf-8-sig')
return df
def get_meetingData():
meetingInfo = []
for marketType in ['2', '4']: # 2 为主板, 4 为中小板
query = {
'type': 'NS',
'sty': 'NSSH',
'st': '1',
'sr': '-1',
'p': '1',
'ps': '5000',
'js': 'var IBhynDx={pages:(pc),data:[(x)]}',
'mkt': marketType,
'rt': '53723990'
}
url = base_url + urlencode(query)
rss = requests.get(url, headers=headers)
jss = rss.text.split('var IBhynDx={pages:1,data:')[1]
data = eval(jss[:-1])
meetingInfo.extend(data)
temp = [j.split(',') for j in meetingInfo]
columns = [
'时间戳', 'yyy', '公司代码', '机构名称', '详情链接', '申报日期', '上会日期', '申购日期', '上市日期',
'9', '拟发行数量', '发行前总股本', '发行后总股本', '13', '占发行后总股本比例', '当前状态', '上市地点',
'主承销商', '承销方式', '发审委委员', '网站', '简称'
]
df = pd.DataFrame(temp, columns=columns)
df['文件链接'] = df['时间戳'].apply(
lambda x: "https://notice.eastmoney.com/pdffile/web/H2_" + x + "_1.pdf"
)
df['详情链接'] = df['公司代码'].apply(
lambda x: "data.eastmoney.com/xg/gh/detail/" + x + ".html")
df = df[[
'机构名称', '当前状态', '上市地点', '拟发行数量', '申报日期', '上会日期', '申购日期', '上市日期',
'主承销商', '承销方式', '9', '发行前总股本', '发行后总股本', '13', '占发行后总股本比例', '发审委委员',
'网站', '公司代码', 'yyy', '时间戳', '简称', '详情链接', '文件链接'
]]
df.to_csv(
'C:/Users/chen/Desktop/IPO_info/EastMoney/eastmoney_data_meeting.csv',
index=False,
encoding='utf-8-sig')
return df
def update_zzscDate(newDate):
if Path(config['eastmoney']['zzsc_pkl']).is_file:
if newDate != config['eastmoney']['lastDate']:
zzsc_dict = pickle.load(config['eastmoney']['zzsc_pkl'])
query = {
'type': 'NS',
'sty': 'NSSE',
'st': '1',
'sr': '-1',
'p': '1',
'ps': '500',
'js': 'var IBhynDx={pages:(pc),data:[(x)]}',
'mkt': '4',
'stat': 'zzsc',
'fd': newDate,
'rt': '53727636'
}
url = base_url + urlencode(query)
rss = requests.get(url, headers=headers)
if rss.text == 'var IBhynDx={pages:0,data:[{stats:false}]}':
return
jss = rss.text.split('var IBhynDx={pages:1,data:')[1]
data = eval(jss[:-1])
for i in data:
name = i.split(',')[1]
if name not in zzsc_dict:
zzsc_dict[name] = i.split(',')[2]
else:
continue
else:
date = g
def get_zzscData(dateList):
zzsc_dict = {}
for date in dateList:
query = {
'type': 'NS',
'sty': 'NSSE',
'st': '1',
'sr': '-1',
'p': '1',
'ps': '500',
'js': 'var IBhynDx={pages:(pc),data:[(x)]}',
'mkt': '4',
'stat': 'zzsc',
'fd': date,
'rt': '53727636'
}
url = base_url + urlencode(query)
rss = requests.get(url, headers=headers)
if rss.text == 'var IBhynDx={pages:0,data:[{stats:false}]}':
continue
jss = rss.text.split('var IBhynDx={pages:1,data:')[1]
data = eval(jss[:-1])
for i in data:
name = i.split(',')[1]
if name not in zzsc_dict:
zzsc_dict[name] = i.split(',')[2]
else:
continue
time.sleep(2)
zzsc = pd.DataFrame(zzsc_dict.items(), columns=['机构名称', '决定终止审查时间'])
zzsc.to_csv('C:/Users/chen/Desktop/IPO_info/EastMoney/eastmoney_zzsc.csv',
encoding='utf-8-sig',
index=False)
return zzsc
def eastmoney_cleanUP():
east_money = pd.read_csv(
'C:/Users/chen/Desktop/IPO_info/EastMoney/eastmoney_raw_data.csv')
east_money.replace({'是否提交财务自查报告': ' '}, '是')
east_money.replace({'是否提交财务自查报告': '不适用'}, '是')
east_money['机构名称'] = east_money['机构名称'].replace(r'\*', '', regex=True)
east_money['机构名称'] = east_money['机构名称'].replace(r'股份有限公司', '', regex=True)
east_money['机构名称'] = east_money['机构名称'].replace(r'\(', '(', regex=True)
east_money['机构名称'] = east_money['机构名称'].replace(r'\)', ')', regex=True)
east_money = east_money[east_money['板块'] != '创业板']
# east_money.sort_values(['机构名称','类型','受理日期'],ascending=[True, True,True],inplace=True)
# east_money.to_csv('C:/Users/chen/Desktop/IPO_info/pre_cleab.csv',encoding='utf-8-sig',index=False)
east_money.drop_duplicates(subset=['机构名称', '类型'],
keep='first',
inplace=True)
east_money.to_csv(
'C:/Users/chen/Desktop/IPO_info/EastMoney/eastmoney_data_cleaned.csv',
encoding='utf-8-sig',
index=False)
return east_money
def gen_finalData(cleaned_easymoney_df, meetingInfo_df, zzsc_df):
'''
主板、中小板 = {'机构名称':'',
'简称':'',
'Wind代码':'',
'统一社会信用代码':'',
'板块':'',
'注册地':'',
'所属行业':'',
'经营范围':'',
'预先披露':'[日期]',
'已反馈':'[日期]',
'预先披露更新':'[日期]',
'发审会':{'中止审查':'[日期]',
'已上发审会,暂缓表决':'[日期]',
'已提交发审会讨论,暂缓表决:'[日期]',
'已通过发审会':'[日期]'},
'终止审查':'[日期]',
'上市日期':'[日期]',
'保荐机构':'',
'律师事务所':,
'会计师事务所':'',
'发行信息':{'拟发行数量':'',
'发行前总股本':'',
'发行后总股本':''},
'反馈文件':'[链接]'
}
'''
shzb = {} # 上海主板
szzxb = {} # 深圳中小板
all_data = {} # 总数据
ekk = cleaned_easymoney_df.values.tolist()
for i in ekk:
if i[0] not in all_data:
all_data[i[0]] = {
'机构名称': i[0] + '股份有限公司',
'简称': i[15],
'Wind代码': '',
'统一社会信用代码': '',
'板块': i[2],
'注册地': '',
'所属行业': '',
'经营范围': '',
'预先披露': '',
'已反馈': '',
'预先披露更新': '',
'发审会': {
'中止审查': '',
'已上发审会,暂缓表决': '',
'已提交发审会讨论,暂缓表决': '',
'已通过发审会': ''
},
'终止审查': '',
'上市日期': '',
'保荐机构': i[4],
'保荐代表人': '',
'律师事务所': i[6],
'签字律师': '',
'会计师事务所': i[8],
'签字会计师': '',
'发行信息': {
'拟发行数量(万)': '',
'发行前总股本(万)': '',
'发行后总股本(万)': ''
},
'反馈文件': ''
}
if i[1] == '已受理':
all_data[i[0]]['预先披露'] = i[12]
elif i[1] == '已反馈':
all_data[i[0]]['已反馈'] = i[12]
elif i[1] == '预先披露更新':
all_data[i[0]]['预先披露更新'] = i[12]
elif i[1] == '已通过发审会':
all_data[i[0]]['发审会']['已通过发审会'] = i[12]
elif i[1] == '已提交发审会讨论,暂缓表决':
all_data[i[0]]['发审会']['已提交发审会讨论,暂缓表决'] = i[12]
elif i[1] == '已上发审会,暂缓表决':
all_data[i[0]]['发审会']['已上发审会,暂缓表决'] = i[12]
elif i[1] == '中止审查':
all_data[i[0]]['发审会']['中止审查'] = i[12]
if all_data[i[0]]['注册地'] == '' and i[3] != '':
all_data[i[0]]['注册地'] = i[3]
if all_data[i[0]]['所属行业'] == '' and i[11] != '':
all_data[i[0]]['所属行业'] = i[11]
if all_data[i[0]]['保荐代表人'] == '' and i[5] != '':
all_data[i[0]]['保荐代表人'] = i[5]
if all_data[i[0]]['签字律师'] == '' and i[7] != '':
all_data[i[0]]['签字律师'] = i[7]
if all_data[i[0]]['签字会计师'] == '' and i[9] != '':
all_data[i[0]]['签字会计师'] = i[9]
ekk2 = meetingInfo_df.values.tolist()
error_set = {}
for i in ekk2:
i[0] = i[0].replace(r'股份有限公司', '')
if i[0] not in all_data:
print("Error: Cannot find ", i[0])
error_set.update({i[0]: i[5]})
continue
if i[1] == '上会未通过':
all_data[i[0]]['发审会']['上会未通过'] = i[5]
elif i[1] == '取消审核':
all_data[i[0]]['发审会']['取消审核'] = i[5]
elif i[1] == '上会通过':
all_data[i[0]]['发审会']['已通过发审会'] = i[5]
if i[7] != '':
all_data[i[0]]['上市时间'] = i[7]
all_data[i[0]]['发行信息']['拟发行数量'] = "{:.2f}".format(int(i[3]) / 10000)
all_data[i[0]]['发行信息']['发行前总股本'] = "{:.2f}".format(int(i[11]) / 10000)
all_data[i[0]]['发行信息']['发行后总股本'] = "{:.2f}".format(int(i[12]) / 10000)
ekk3 = zzsc_df.values.tolist()
for i in ekk3:
name = i[0].replace(r'股份有限公司', '')
if name not in all_data:
print("Error: Cannot find in zzsc", i[0])
error_set.update({name: i[1]})
continue
all_data[name]['终止审查'] = i[1]
# for key, value in all_data.items():
# if value['板块'] == '中小板' and value['终止审查'] == '' and value['上市日期'] == '':
# szzxb.update({key: value})
# if value['板块'] == '主板企业' and value['终止审查'] == '' and value['上市日期'] == '':
# shzb.update({key: value})
return all_data, error_set
if __name__ == '__main__':
# dateList = date_gen()
# get_eastmoneyData(dateList)
east_money_df = eastmoney_cleanUP()
# east_money_df = pd.read_csv('C:/Users/chen/Desktop/IPO_info/EastMoney/easymoney_data_new.csv',keep_default_na=False)
meetingInfo_df = pd.read_csv('C:/Users/chen/Desktop/IPO_info/EastMoney/eastmoney_data_meeting.csv',keep_default_na=False)
# meetingInfo_df = get_meetingData()
zzsc_df = pd.read_csv('C:/Users/chen/Desktop/IPO_info/EastMoney/zzsc.csv')
all_data,_ = gen_finalData(east_money_df,meetingInfo_df,zzsc_df)
print('Complete!')
with open('C:/Users/chen/Desktop/IPO_info/zb_zxb_info.pkl','wb') as f:
pickle.dump(all_data, f, pickle.HIGHEST_PROTOCOL) | [
"[email protected]"
]
| |
250f31b763d02f2dba25473438a3e6fdcc71ebc9 | 55a9b1b294d5a402c63848f9f7386e3bf93645da | /docker/src/clawpack-5.3.1/pyclaw/src/petclaw/tests/test_io.py | 56c544ed1ff6d6cd39629552d19d32f8513d88d9 | [
"LicenseRef-scancode-public-domain",
"CC-BY-4.0",
"MIT",
"BSD-3-Clause"
]
| permissive | geohackweek/visualization | b606cfade5d31f59cc38602df05930aed6e19b17 | 5d29fa5b69d69ee5c18ffaef2d902bd51f5807c8 | refs/heads/gh-pages | 2021-01-21T13:34:44.622039 | 2019-09-06T23:28:08 | 2019-09-06T23:28:08 | 68,648,198 | 11 | 13 | NOASSERTION | 2019-09-06T23:28:09 | 2016-09-19T21:27:33 | Jupyter Notebook | UTF-8 | Python | false | false | 509 | py | from clawpack import pyclaw
from clawpack import petclaw
import os
class PetClawIOTest(pyclaw.IOTest):
@property
def solution(self):
return petclaw.Solution()
@property
def file_formats(self):
return ['hdf5']
@property
def this_dir(self):
return os.path.dirname(os.path.abspath(__file__))
@property
def test_data_dir(self):
return os.path.join(self.this_dir, '../../pyclaw/tests/test_data')
def test_io_from_binary(self):
return | [
"[email protected]"
]
| |
b5719efc41c1787dbdbf3f5fd14e1e331769b2cf | 55a4d7ed3ad3bdf89e995eef2705719ecd989f25 | /main/law/spark_short/spark_short_limai_and_wenshu_origin/lawlist_to_lawid_2018-05-10_imp_other_etl_online.py | e9734a7e27e63e8f7b1081c614d979c3b4078dbe | []
| no_license | ichoukou/Bigdata | 31c1169ca742de5ab8c5671d88198338b79ab901 | 537d90ad24eff4742689eeaeabe48c6ffd9fae16 | refs/heads/master | 2020-04-17T04:58:15.532811 | 2018-12-11T08:56:42 | 2018-12-11T08:56:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,190 | py | # -*- coding: utf-8 -*-
from pyspark import SparkContext,SparkConf
from pyspark.sql import SQLContext
from pyspark.sql.types import *
import re
def p(x):
if x[1]:
print type(x)
print x
# print x[1]
# exit(0)
def filter_(x):
if x[1] and x[1] != '': #过滤掉数据库中,lawlist为Null或''的行。
return True
return False
def get_uuids(uuids):
l = []
for x in uuids:
l.append(x) #将分组结果ResultIterable转换为List
return "||".join(l) #列表不能直接存入Mysql
def get_lawlist_ids(uuid_ids):
uuid,ids = uuid_ids[0],uuid_ids[1]
lawlist_id = []
for x in ids:
lawlist_id.append(x)
return (uuid,"||".join(lawlist_id))
def get_title_short_id(x): #保证lawlist和law_id的有序!
k = x[0] + "|" + x[1]
v = str(x[2])
return (k,v)
if __name__ == "__main__":
conf = SparkConf()
sc = SparkContext(conf=conf)
sqlContext = SQLContext(sc)
# sc.setLogLevel("ERROR") # ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN
# lawlist = sqlContext.read.jdbc(url='jdbc:mysql://cdh-slave1:3306/civil', table='uuid_reason_lawlist',column='id',lowerBound=0,upperBound=100000,numPartitions=70,properties={"user": "root", "password": "HHly2017."})
lawlist_id = sqlContext.read.jdbc(url='jdbc:mysql://cdh-slave1:3306/laws_doc_v3', table='(select id,title_short,art_num,lawlist_id from law_rule_result2) tmp',column='id',lowerBound=1,upperBound=2881160,numPartitions=30,properties={"user": "weiwc", "password": "HHly2017."})
# lawlist= sqlContext.read.jdbc(url='jdbc:mysql://cdh-slave1:3306/civil', table='uuid_reason_lawlist',predicates=["id >= 1 and id <= 100"],properties={"user": "root", "password": "HHly2017."})
lawlist= sqlContext.read.jdbc(url='jdbc:mysql://cdh-slave1:3306/laws_doc_imp_other', table='(select id,uuid,lawlist from imp_other_etl ) tmp2',column='id',lowerBound=1,upperBound=4733848,numPartitions=108,properties={"user": "weiwc", "password": "HHly2017."})
def etl_lawlist(p1, p2, lawlist):
if lawlist and lawlist.strip() != '':
# if not (lawlist.strip().startswith("[") and lawlist.strip().endswith("]")): # 去掉前后的所有"
r1 = re.findall(ur'"{0,5}\["{0,5}', lawlist.strip())
r2 = re.findall(ur'"{0,5}\]"{0,5}', lawlist.strip())
if r1 and r2:
start = r1.pop(0)
end = r2.pop()
lawlist = lawlist.strip().replace(start, "").replace(end, "")
# l = list(eval(lawlist.strip())) #有脏数据不能直接使用eval()
l = lawlist.split('", "') #lawlist类似于:《最高人民法院关于审理建设工程施工合同纠纷案件适用法律问题的解释》第三条", "《中华人民共和国合同法》第九十七条", "最高人民法院关于审理建设工程施工合同纠纷案件适用法律问题的解释》第十条", "《中华人民共和国合同法》第九十八条
if l:
tl = []
for i in l:
r1 = re.split(p2, i)
if len(r1) > 2: #确保既有《,又有》
r2 = re.search(p1, r1[2])
if r2: #判断是否找到了条
tl.append(r1[1] + "|" + r2.group(0))
return list(set(tl)) # 去重
return []
return []
return []
lawlist_id2 = lawlist_id.select('title_short','art_num','lawlist_id').map(lambda x:get_title_short_id(x))
p1 = ur'\u7b2c[\u4e00\u4e8c\u4e09\u56db\u4e94\u516d\u4e03\u516b\u4e5d\u5341\u767e\u5343]{1,10}\u6761'
p2 = ur'[\u300a\u300b]' # 按《》切分
c = lawlist.select('uuid','lawlist').map(lambda x:(x[0],x[1])).flatMapValues(lambda x: etl_lawlist(p1, p2, x)).filter(filter_).map(lambda x: (x[1], x[0])).groupByKey().mapValues(lambda v: get_uuids(v))
# flatMapValues(lambda x: etl_lawlist(p1, p2, x)).filter(filter_).map(lambda x: (x[1].encode("utf-8"), x[0]))
# groupByKey().mapValues(lambda v: get_uuids(v))
# filter(filter_).map(lambda x: (x[1].encode("utf-8"), x[0])).groupByKey().mapValues(lambda v: get_uuids(v))
# print str(c.count()) + "======================"
# c.foreach(p)
lawlist_title_id_result = lawlist_id2.join(c).map(lambda x:x[1]).filter(filter_).flatMapValues(lambda x:(x.split("||"))).map(lambda x:(x[1],x[0])).groupByKey().map(lambda x:(get_lawlist_ids(x)))
schema = StructType([StructField("uuid", StringType(), False),StructField("law_id", StringType(), True)])
f = sqlContext.createDataFrame(lawlist_title_id_result, schema=schema)
# , mode = "overwrite"
# useUnicode = true & characterEncoding = utf8,指定写入mysql时的数据编码,否则会乱码。
# print str(f.count()) + "======================"
f.write.jdbc(url='jdbc:mysql://cdh-slave1:3306/laws_doc_imp_other?useUnicode=true&characterEncoding=utf8', table='imp_other_uuid_law_id',properties={"user": "weiwc", "password": "HHly2017."})
sc.stop() | [
"[email protected]"
]
| |
265a5e2c314e412b545f2390b981e49d3b9d7a25 | 09ae3f372d1000f118ad80874870ae420a4be66f | /scikit-learn-master/examples/compose/plot_digits_pipe.py | c5b0fb2a136094f0d16c180883cdcc3175896a9d | [
"Apache-2.0",
"BSD-3-Clause"
]
| permissive | lqkweb/learnMLflow | 998f80c3828879b8d542125bc95c6345b8e9b29a | 13c5decaebba95b1b90f92021be35e343b4764af | refs/heads/master | 2022-10-18T06:17:23.584172 | 2019-01-18T09:51:38 | 2019-01-18T09:51:38 | 166,145,472 | 2 | 0 | Apache-2.0 | 2022-09-30T18:26:17 | 2019-01-17T02:22:29 | Python | UTF-8 | Python | false | false | 2,395 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.linear_model import SGDClassifier
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
# Define a pipeline to search for the best combination of PCA truncation
# and classifier regularization.
logistic = SGDClassifier(loss='log', penalty='l2', early_stopping=True,
max_iter=10000, tol=1e-5, random_state=0)
pca = PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
# Parameters of pipelines can be set using ‘__’ separated parameter names:
param_grid = {
'pca__n_components': [5, 20, 30, 40, 50, 64],
'logistic__alpha': np.logspace(-4, 4, 5),
}
search = GridSearchCV(pipe, param_grid, iid=False, cv=5)
search.fit(X_digits, y_digits)
print("Best parameter (CV score=%0.3f):" % search.best_score_)
print(search.best_params_)
# Plot the PCA spectrum
pca.fit(X_digits)
fig, (ax0, ax1) = plt.subplots(nrows=2, sharex=True, figsize=(6, 6))
ax0.plot(pca.explained_variance_ratio_, linewidth=2)
ax0.set_ylabel('PCA explained variance')
ax0.axvline(search.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
ax0.legend(prop=dict(size=12))
# For each number of components, find the best classifier results
results = pd.DataFrame(search.cv_results_)
components_col = 'param_pca__n_components'
best_clfs = results.groupby(components_col).apply(
lambda g: g.nlargest(1, 'mean_test_score'))
best_clfs.plot(x=components_col, y='mean_test_score', yerr='std_test_score',
legend=False, ax=ax1)
ax1.set_ylabel('Classification accuracy (val)')
ax1.set_xlabel('n_components')
plt.tight_layout()
plt.show()
| [
"[email protected]"
]
| |
4661f874d007a11754a46c3beedde6041690f9e9 | f6fafa5ade66f3168a4c8960389d6fb75539cf9b | /authmobile/views.py | c589d37ac540e48d45157b3ada270cf700ef5c9a | []
| no_license | tokibito/nullpobug-mobile-twitter-client | 7fc6593bd086017eaa7fad96f60efa43193ff526 | dbfb75a16d4020f471187bb1398e06ef42fc9862 | refs/heads/master | 2020-07-25T07:39:49.730289 | 2009-07-23T07:27:06 | 2009-07-23T07:27:06 | 208,217,866 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 990 | py | # vim:fileencoding=utf8
from django.http import HttpResponseBadRequest, HttpResponseRedirect
from django.conf import settings
from django.views.generic.simple import direct_to_template
from django.contrib.auth import authenticate, login
from django.core.urlresolvers import reverse
from authmobile.models import MobileUser
def login_easy(request):
"""
かんたんログイン
"""
if request.agent.is_nonmobile():
return HttpResponseBadRequest(u'モバイル端末でアクセスしてください')
# サブスクライバーIDを取得
if request.agent.is_docomo():
guid = request.agent.guid
else:
guid = request.agent.serialnumber
user = authenticate(subscriber_id=guid)
if not user:
return direct_to_template(request, 'authmobile/error.html', extra_context={
'message': u'ユーザが見つかりません。',
})
login(request, user)
return HttpResponseRedirect(reverse('site_index'))
| [
"[email protected]"
]
| |
229128424c9b4cb65c8e0638f4b143ddde03708d | eab5f1c8292a76babb0e1b86471db954ac0d1a41 | /guvi90.py | ea2fae62f19a846b8a998e9be1d931e0daa5d69e | []
| no_license | pavanimallem/pavs3 | 56cabfa7cc56c650746cbf80296e6fe32578f953 | c43e6b2993317c438554bbcae304eb6aa6763801 | refs/heads/master | 2020-03-24T07:26:10.827892 | 2018-09-19T11:18:24 | 2018-09-19T11:18:24 | 142,564,155 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 84 | py | str=raw_input()
x=[]
for i in str:
if(i.isdigit()):
x.append(i)
print "".join(x)
| [
"[email protected]"
]
| |
46a68cf8d816140c27a6905b438ef3b5390e2390 | 29ecf78ebd8fe26409db20f5a5ccbf40a0b7bf77 | /posts/tests/test_api_views.py | 10d12405755f41f59f77e32766cef9f8a3457530 | []
| no_license | pranavchandran/Django-Tests-unleashed | 56225d1cdd6cca58df4e0fffec33b3d36cabbad7 | dc76e6b87cea7842388cd90bbd5a45c563e4af3f | refs/heads/master | 2022-09-29T11:11:10.517822 | 2020-06-10T06:21:29 | 2020-06-10T06:21:29 | 271,107,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,879 | py | from rest_framework.test import APIRequestFactory,force_authenticate
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.contrib.auth import get_user_model
from django.utils import timezone
from django.contrib.auth.models import AnonymousUser,User
from posts.models import Post
from posts.api.views import (
PostCreateAPIView,
PostDeleteAPIView,
PostDetailAPIView,
PostListAPIView,
PostUpdateAPIView,
)
# User = get_user_model
class PostApiTest(TestCase):
def setUp(self):
self.data = {"title":"coming days","content":"time is","publish":timezone.now().date()}
self.factory = APIRequestFactory()
self.user = User.objects.create(
username='test1', email='[email protected]', password='top_secret',
is_staff=True,is_superuser=True)
def create_post(self,title='crucial'):
return Post.objects.create(title=title)
def test_get_data(self):
list_url = reverse("posts-api:list")
obj =self.create_post()
detail_url = reverse('posts-api:detail',kwargs={'slug':obj.slug})
request = self.factory.get(list_url)
response = PostListAPIView.as_view()(request)
self.assertEqual(response.status_code,200)
request = self.factory.get(detail_url)
response = PostDetailAPIView.as_view()(request,slug=obj.slug)
self.assertEqual(response.status_code,200)
def test_post_data(self):
create_url = reverse("posts-api:create")
request = self.factory.post(create_url,data=self.data)
response1 = PostCreateAPIView.as_view()(request)
self.assertEqual(response1.status_code,401)
force_authenticate(request,user=self.user)
response = PostCreateAPIView.as_view()(request)
self.assertEqual(response.status_code,201)
def test_update_data(self):
obj = self.create_post()
update_url = reverse("posts-api:update",kwargs={"slug":obj.slug})
request = self.factory.put(update_url,data=self.data)
# print(request)
response1 = PostUpdateAPIView.as_view()(request,slug=obj.slug)
self.assertEqual(response1.status_code,401)
force_authenticate(request,user=self.user)
response = PostUpdateAPIView.as_view()(request,slug=obj.slug)
self.assertEqual(response.status_code,200)
def test_delete_data(self):
obj = self.create_post()
delete_url = reverse("posts-api:delete",kwargs={"slug":obj.slug})
request = self.factory.delete(delete_url)
print(request)
response1 = PostDeleteAPIView.as_view()(request,slug=obj.slug)
self.assertEqual(response1.status_code,401)
force_authenticate(request,user=self.user)
response = PostDeleteAPIView.as_view()(request,slug=obj.slug)
self.assertEqual(response.status_code,204) | [
"[email protected]"
]
| |
aea3200a6cf1ceec2a12eac766f221b4f85cb99d | 03415e25427d9a17bada8fd75daadc45c093c377 | /LST_Collect.py | 7cf76c296c1d1f4a78e7ce9e9b0fd9243fd117e1 | []
| no_license | mwilensky768/MJW-HERA | 472d639bd4086a31be112564be9b2b22e70e3e86 | da1710a17123cc3ccd3e318e224712eb80bcb3bd | refs/heads/master | 2021-08-10T22:32:15.391270 | 2017-11-13T01:45:48 | 2017-11-13T01:45:48 | 108,204,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 543 | py | import pyuvdata
import glob
import numpy as np
from math import pi
inpath = '/data6/HERA/data/2458042/zen.2458042.'
pathlist = glob.glob(inpath + '*.xx*.uv')
obslist = np.sort(np.array([int(path[path.find('zen.') + 12:path.find('.xx')])
for path in pathlist]))
pathlist_sort = [inpath + str(obs) + '.xx.HH.uv' for obs in obslist]
UV = pyuvdata.UVData()
LST = []
for path in pathlist_sort:
UV.read_miriad(path)
LST.append(UV.lst_array[0])
np.save('/data4/mwilensky/GS_LST.npy', np.array(LST) * 23.934 / (2 * pi))
| [
"[email protected]"
]
| |
c175141ce719e09b6cea9f37d217223ff7d6033a | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/H/hitkarsh/karnataka_2.py | 6c81472eb4a4013c05dd3d24a663158f61abd084 | []
| no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,692 | py | import scraperwiki
import mechanize # added by Usha
import re # added by Usha
import lxml.html
url="http://censusindia.gov.in/Census_Data_2001/Village_Directory/List_of_Villages/List_of_Villages_Alphabetical.aspx?cki=&State_Code=29"
import string
#create list of upper case alphabets
l=list(string.ascii_uppercase)
#create list 1-35
l1=list(range(1,36))
l2=[]
s_no=0
#convert numbers in l2 to string
for i in l1:
l2.append(str(i))
#append a 0 for single digit numbers
for i in range(10):
l2[i]='0'+l2[i]
state_count=0
c=1
data=[]
#run loop for all state and union territories
#while state_count<35:
while state_count<1:
#add state code to the url
#url1=url+l2[state_count]+"&SearchKey="
url1=url+"&SearchKey="
state_count+=1
count=16
l_c=0
#data=[]
row=[]
#run loop for alphabets
while count<26:
#while count<2:
#add search alphabet to the url
url2=url1+l[count]
# code added by Usha Nair
br = mechanize.Browser()
br.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]
response = br.open(url2)
VAR1 = response.read() #reads the source file for the web page
br.select_form(nr=0)
br.set_all_readonly(False)
mnext = re.search("""<a id="lnkShowAll" href="javascript:__doPostBack\('(.*?)','(.*?)'\)" style="font-family:Verdana;font-size:Smaller;">Show All""", VAR1)
if not mnext:
count+=1
continue
br["__EVENTTARGET"] = mnext.group(1)
br["__EVENTARGUMENT"] = mnext.group(2)
#br.find_control("btnSearch").disabled = True
response = br.submit()
VAR2 = response.read() # source code after submitting show all
print "response"
print response
print "VAR2"
print VAR2
# Usha Nair till here
#html = scraperwiki.scrape(url2)
#root = lxml.html.fromstring(html)
root = lxml.html.fromstring(VAR2)
count+=1
#select div where data exists
for el in root.cssselect("div#printarea td"):
#select appropriate table row
for el2 in el.cssselect("tr.GridAlternativeRows td"):
if l_c<4:
row.append(el2.text_content())
l_c+=1
else:
row.append(el2.text_content())
l_c=0
data.append(row)
#save to data base
scraperwiki.sqlite.save(unique_keys=["sl_no"], data={"sl_no":s_no,"village_name":row[1], "village_code":row[2],"Sub_district_Name":row[3],"District_Name":row[4]})
s_no+=1
row=[]
#select appropriate table row
for el2 in el.cssselect("tr.GridRows td"):
if l_c<4:
row.append(el2.text_content())
l_c+=1
else:
row.append(el2.text_content())
l_c=0
data.append(row)
#save to data base
scraperwiki.sqlite.save(unique_keys=["sl_no"], data={"sl_no":s_no,"village_name":row[1], "village_code":row[2],"Sub_district_Name":row[3],"District_Name":row[4]})
s_no+=1
row=[]
print "completed scrapping"
import scraperwiki
import mechanize # added by Usha
import re # added by Usha
import lxml.html
url="http://censusindia.gov.in/Census_Data_2001/Village_Directory/List_of_Villages/List_of_Villages_Alphabetical.aspx?cki=&State_Code=29"
import string
#create list of upper case alphabets
l=list(string.ascii_uppercase)
#create list 1-35
l1=list(range(1,36))
l2=[]
s_no=0
#convert numbers in l2 to string
for i in l1:
l2.append(str(i))
#append a 0 for single digit numbers
for i in range(10):
l2[i]='0'+l2[i]
state_count=0
c=1
data=[]
#run loop for all state and union territories
#while state_count<35:
while state_count<1:
#add state code to the url
#url1=url+l2[state_count]+"&SearchKey="
url1=url+"&SearchKey="
state_count+=1
count=16
l_c=0
#data=[]
row=[]
#run loop for alphabets
while count<26:
#while count<2:
#add search alphabet to the url
url2=url1+l[count]
# code added by Usha Nair
br = mechanize.Browser()
br.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]
response = br.open(url2)
VAR1 = response.read() #reads the source file for the web page
br.select_form(nr=0)
br.set_all_readonly(False)
mnext = re.search("""<a id="lnkShowAll" href="javascript:__doPostBack\('(.*?)','(.*?)'\)" style="font-family:Verdana;font-size:Smaller;">Show All""", VAR1)
if not mnext:
count+=1
continue
br["__EVENTTARGET"] = mnext.group(1)
br["__EVENTARGUMENT"] = mnext.group(2)
#br.find_control("btnSearch").disabled = True
response = br.submit()
VAR2 = response.read() # source code after submitting show all
print "response"
print response
print "VAR2"
print VAR2
# Usha Nair till here
#html = scraperwiki.scrape(url2)
#root = lxml.html.fromstring(html)
root = lxml.html.fromstring(VAR2)
count+=1
#select div where data exists
for el in root.cssselect("div#printarea td"):
#select appropriate table row
for el2 in el.cssselect("tr.GridAlternativeRows td"):
if l_c<4:
row.append(el2.text_content())
l_c+=1
else:
row.append(el2.text_content())
l_c=0
data.append(row)
#save to data base
scraperwiki.sqlite.save(unique_keys=["sl_no"], data={"sl_no":s_no,"village_name":row[1], "village_code":row[2],"Sub_district_Name":row[3],"District_Name":row[4]})
s_no+=1
row=[]
#select appropriate table row
for el2 in el.cssselect("tr.GridRows td"):
if l_c<4:
row.append(el2.text_content())
l_c+=1
else:
row.append(el2.text_content())
l_c=0
data.append(row)
#save to data base
scraperwiki.sqlite.save(unique_keys=["sl_no"], data={"sl_no":s_no,"village_name":row[1], "village_code":row[2],"Sub_district_Name":row[3],"District_Name":row[4]})
s_no+=1
row=[]
print "completed scrapping"
| [
"[email protected]"
]
| |
1e069e901a9d931704594b568c24eb89ab3392b6 | 541fed374b1d1ebff33c42496db84337e06177b6 | /City.py | 4e62852ace82662a8167ce02a534b5b510013eba | []
| no_license | huangruihaocst/tsp-genetic | faaa6654459cfce521f936bd31c5438c19f8d250 | 794be023d698fca41caf797810feb44a0024cdea | refs/heads/master | 2020-03-21T17:47:49.697351 | 2016-05-03T04:42:06 | 2016-05-03T04:42:06 | 138,854,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 112 | py | class City:
def __init__(self, name, x, y):
self.name = name
self.x = x
self.y = y
| [
"[email protected]"
]
| |
a6c789b7be6e47e5a363cd0cc4b9e9d846ce4005 | b3b443f0bc49bbb10c26b51fe89e6860d4ca3d3a | /ctreport_selenium/ctreport_html/scripts/detailmodal.py | d3f7cf88a94f4e60fc79f4cc43686715a63414b6 | [
"MIT"
]
| permissive | naveens33/ctreport-selenium | 6b3a1cc93a6741a1d493c2452c1cf56c6d85c052 | 9553b5c4b8deb52e46cf0fb3e1ea7092028cf090 | refs/heads/master | 2022-12-23T04:55:12.226339 | 2020-08-29T19:22:00 | 2020-08-29T19:22:00 | 228,779,087 | 2 | 2 | MIT | 2022-12-18T22:53:51 | 2019-12-18T07:03:39 | Python | UTF-8 | Python | false | false | 5,082 | py | def content(var):
c = '''
<script>
function createmodal(id) {
''' + var + '''
var content = '<table class="table table-bordered ">';
var footer = ''
if(Array.isArray(tests[id])){
content += '<tbody>\
<tr class="table-secondary"><td>Expected</td></tr>\
<tr class="align-middle">';
content += '<td>'+tests[id][0].join(", ")+'</td></tr>\
<tr class="table-secondary"><td>Actual</td></tr>\
<tr class="align-middle">';
content += '<td>'+tests[id][1].join(", ")+'</td></tr>';
}
else{
content += '<thead class="thead-light">\
<tr>\
<th class="align-middle text-sm-center">Status</th>\
<th class="align-middle text-sm-center">Key</th>\
<th class="align-middle text-sm-center">Expected</th>\
<th class="align-middle text-sm-center">Actual</th>\
</tr>\
</thead>\
<tbody>';
for(var key in tests[id]) {
status =''
key_='<td>'+key+'</td>'
expected='<td>'+tests[id][key][0]+'</td>';
actual='<td>'+tests[id][key][1]+'</td>';
if (tests[id][key][2]=='true'){
status='<i class="fa fa-check-circle align-middle text-sm-center" style="color:#00AF00; font-size: 18px;"></i>';
}
else{
status='<i class="fa-times-circle fa align-middle text-sm-center" style="color:#F7464A; font-size: 18px;"></i>';
if (tests[id][key][0]=="null"){
key_ = '<td style="background-color:rgb(247, 131, 134,0.3);">'+key+'</td>'
expected='<td></td>';
}
else if(tests[id][key][1]=="null"){
actual='<td style="color:#F7464A;">\
<i class="fas fa-ban" data-toggle="tooltip" data-placement="right" data-original-title="Key missing in actual data"></i>\
</td>';
}
else{
actual='<td style="background-color: #ffffb2">'+tests[id][key][1]+'</td>';
}
}
content += '<tr class="align-middle text-sm-center">\
<td>\
'+status+'\
</td>\
'+key_+'\
'+expected+'\
'+actual+'\
</tr>';
footer = '<div class="row">\
<div class="col-2"><i class="fas fa-square-full border border-secondary" style="color: #ffffb2"></i></div>\
<div class="col-10">\Actual is not same as Expected</div>\
</div>\
<div class="row">\
<div class="col-2"><i class="fas fa-square-full border border-secondary" style="color:rgb(247, 131, 134,0.3);"></i></div>\
<div class="col-10">New key found in actual</div>\
</div>\
<div class="row">\
<div class="col-2"><i class="fas fa-ban" style="color:#F7464A;"></i></div>\
<div class="col-10">Key missing in actual data</div>\
</div>\';
}
}
content += '</tbody>\
</table>';
var header = "Expected vs Actual";
var html = '<div id="modalWindow" class="modal" data-keyboard="false" data-backdrop="static">';
html += '<div class="modal-dialog modal-dialog-scrollable ">\
<div class="modal-content">\
<div class="modal-header">\
<button type="button" id="closeModal" class="btn btn-danger" data-dismiss="modal" onclick=deletemodal("modalWindow") style="margin:auto 1rem auto auto; font-size: smaller;">Close</button>\
</div>\
<div class="modal-body">'
+content+'\
</div>\
<div class="modal-footer small">'\
+footer+'\
</div>\
</div>\
</div>\
</div>';
$("#myModal").html(html);
$("#modalWindow").modal();
}
function deletemodal(id) {
var element = document.getElementById(id);
element.parentNode.removeChild(element);
};
</script>
'''
return c
| [
"[email protected]"
]
| |
607219c000f7f31a1333d2b772480f3aad169545 | fea6e9d6b20b0c5f2a05a6f2433aae4176b2a00a | /server/applibs/account/tasks/fetch_status.py | 1c80b02e381a041e1e063576ae4ca0441bcb6c7a | []
| no_license | fanshuai/kubrick | fddf6c21bcd500223d9a05bd002e47eb1ecf8839 | b7ed6588e13d2916a4162d56509d2794742a1eb1 | refs/heads/main | 2023-03-24T12:21:44.562850 | 2021-03-19T15:11:40 | 2021-03-19T15:11:40 | 349,445,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,342 | py | """
验证码发送状态同步
"""
import logging
from kubrick.celery import app
from server.corelib.dealer import deal_time
from server.corelib.notice.async_tasks import send_dd_msg__task
from server.constant.djalias import CQueueAlias
logger = logging.getLogger('kubrick.celery')
@app.task(queue=CQueueAlias.Timed.value)
def fetch_status_pnverify(now=None):
""" 短信验证码状态检查 """
from server.constant import mochoice as mc
from server.applibs.account.models import PNVerify
time_start, time_end = deal_time.round_floor_ten_mins(now=now)
pnv_qs = PNVerify.objects.filter(
status=mc.SMSStatus.Waiting,
created_at__gte=time_start,
created_at__lt=time_end,
)
done_count = 0
waiting_count = pnv_qs.count()
for pnv in pnv_qs:
pnv.sms_code_query()
done_count += 1 if pnv.is_status_final else 0
done_info = f'{time_start} ~ {time_end}: {done_count}/{waiting_count}'
logger.info(f'fetch_status_pnverify__done {done_info}')
if done_count != waiting_count:
send_dd_msg__task(f'短信验证码状态检查:{done_info}')
result = dict(
task='fetch_status_pnverify',
done=done_count,
waiting=waiting_count,
end_at=time_end.isoformat(),
start_at=time_start.isoformat(),
)
return result
| [
"[email protected]"
]
| |
a85b4046bfc7987cb03c53122f8ed3882aa82d61 | 2272759c7b09397ff462115fc68d1b8363f572db | /app/__init__.py | 5715f71dd4d096fce31c078b09bcf1a4e9ed4dcc | [
"MIT",
"CC-BY-4.0"
]
| permissive | Bubblbu/fhe-collector | e8f2f2b8d80a86c11c43d506244077b879ebedfc | b587a952eec318eab6cf430383fe83ca85277895 | refs/heads/master | 2020-03-30T17:49:08.065705 | 2019-09-17T22:54:25 | 2019-09-17T22:54:25 | 151,471,327 | 0 | 0 | MIT | 2018-10-03T19:48:38 | 2018-10-03T19:48:38 | null | UTF-8 | Python | false | false | 34,027 | py | """
.. module::
:platform: Linux
:synopsis: Web-app to collect facebook metrics.
.. moduleauthor:: Stefan Kasberger <[email protected]>
"""
__author__ = 'Stefan Kasberger'
__email__ = '[email protected]'
__copyright__ = 'Copyright (c) 2019 Stefan Kasberger'
__license__ = 'MIT License'
__version__ = '0.0.1'
__url__ = 'https://github.com/ScholCommLab/fhe-collector'
# from apscheduler.schedulers.background import BackgroundScheduler
from datetime import datetime
from facebook import GraphAPI
from json import dumps, loads
import logging
from logging.handlers import RotatingFileHandler
import os
import pandas as pd
from psycopg2 import connect
import re
import requests
import urllib.parse
from tqdm import tqdm
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_debugtoolbar import DebugToolbarExtension
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
db = SQLAlchemy()
migrate = Migrate()
def validate_doi(doi):
"""Validate a DOI via regular expressions.
Parameters
----------
doi : string
A single DOI to be validated.
Returns
-------
bool
True, if DOI is valid, False if not.
"""
# validate doi
patterns = [
r"^10.\d{4,9}/[-._;()/:A-Z0-9]+$",
r"^10.1002/[^\s]+$",
r"^10.\d{4}/\d+-\d+X?(\d+)\d+<[\d\w]+:[\d\w]*>\d+.\d+.\w+;\d$",
r"^10.1021/\w\w\d+$",
r"^10.1207\/[\w\d]+\&\d+_\d+$"
]
is_valid = False
for pat in patterns:
if re.match(pat, doi, re.IGNORECASE):
is_valid = True
return is_valid
def init_from_csv(filename, batch_size):
"""Import DOI's from a csv file.
Imports the DOI's from a csv file into the database. Stores the raw data
and adds dois in table.also the It must contain an
attribute `doi`, and optionally `url`, `url_type` and `date`.
For test purposes, there is a file with 100 entries you can use.
Checks, if duplicate dois are in the file and removes them.
Parameters
----------
filename : string
Filepath for the csv file, relative from the root dir.
Returns
-------
bool
True, if import worked, False if not.
"""
from app.models import Doi
from app.models import Import
from app.models import Url
from app import db
num_dois_added = 0
num_urls_added = 0
dois_added = []
url_import_lst = []
url_list = []
filename = '{0}/{1}'.format(BASE_DIR, filename)
df = pd.read_csv(filename, encoding='utf8')
df = df.drop_duplicates(subset='doi')
df = df.fillna(False)
data = df.to_json(orient='records')
try:
db_imp = Import('<Init '+filename+'>', data)
db.session.add(db_imp)
db.session.commit()
except:
print('ERROR: Import() can not be stored in Database.')
return False
for i in range(0, len(df), batch_size):
for _, row in tqdm(df[i:i+batch_size].iterrows()):
dict_tmp = {}
is_valid = validate_doi(row['doi'])
if is_valid:
if row['doi'] and row['date']:
db_doi = None
try:
db_doi = Doi(
doi=row['doi'],
date_published=datetime.strptime(row['date'], '%Y-%m-%d'),
import_id=db_imp.id,
is_valid=True
)
db.session.add(db_doi)
num_dois_added += 1
dois_added.append(row['doi'])
except:
print('ERROR: Can not import Doi {0}.'.format(row['doi']))
if row['url'] and row['url_type'] and db_doi:
if row['url'] not in url_list:
url_list.append(row['url'])
dict_tmp['doi'] = db_doi.doi
dict_tmp['url'] = row['url']
dict_tmp['url_type'] = row['url_type']
url_import_lst.append(dict_tmp)
else:
print('WARNING: Entry {0} is not valid'.format(row['doi']))
else:
print('WARNING: DOI {} is not valid.'.format(row['doi']))
db.session.commit()
for i in range(0, len(url_import_lst), batch_size):
for d in url_import_lst[i:i+batch_size]:
try:
db_url = Url(
url=d['url'],
doi=d['doi'],
url_type=d['url_type']
)
db.session.add(db_url)
num_urls_added += 1
except:
print('ERROR: Can not import Url {0}.'.format(d['url']))
db.session.commit()
db.session.close()
print('{0} doi\'s added to database.'.format(num_dois_added))
print('{0} url\'s added to database.'.format(num_urls_added))
return {'dois_added': dois_added, 'num_dois_added': num_dois_added, 'num_urls_added': num_urls_added}
def add_entries_to_database(data, import_id):
"""Store data to table Doi and Url.
Parameters
----------
data : list
List of dictionaries.
import_id : string
Id of ``Import()`` table, where the raw data was stored in.
Returns
-------
dict
Import metrics as ``dict``. Keys: ``doi_list``, ``dois_added``,
``dois_already_in``, ``urls_added`` and ``urls_already_in``.
"""
from app.models import Doi
from app.models import Url
num_dois_added = 0
num_urls_added = 0
dois_added = []
url_import_lst = []
url_list = []
for entry in tqdm(data):
dict_tmp = {}
is_valid = validate_doi(entry['doi'])
if is_valid:
db_doi = None
result_doi = Doi.query.filter_by(doi=entry['doi']).first()
if result_doi is None:
try:
db_doi = Doi(
doi=entry['doi'],
date_published=datetime.strptime(entry['date'], '%Y-%m-%d'),
import_id=import_id,
is_valid=True
)
db.session.add(db_doi)
num_dois_added += 1
dois_added.append(entry['doi'])
db.session.commit()
except:
print('ERROR: Can not import Doi {0}.'.format(entry['doi']))
if entry['url'] and entry['url_type'] and db_doi:
if entry['url'] not in url_list:
url_list.append(entry['url'])
dict_tmp['doi'] = db_doi.doi
dict_tmp['url'] = entry['url']
dict_tmp['url_type'] = entry['url_type']
url_import_lst.append(dict_tmp)
else:
print('WARNING: Entry {0} is not valid'.format(entry['doi']))
else:
print('WARNING: DOI {} is not valid.'.format(entry['doi']))
db.session.commit()
for d in url_import_lst:
try:
db_url = Url(
url=d['url'],
doi=d['doi'],
url_type=d['url_type']
)
db.session.add(db_url)
num_urls_added += 1
except:
print('ERROR: Can not import Url {0}.'.format(d['url']))
db.session.commit()
db.session.close()
print('{0} doi\'s added to database.'.format(num_dois_added))
print('{0} url\'s added to database.'.format(num_urls_added))
return {'dois_added': dois_added, 'num_dois_added': num_dois_added, 'num_urls_added': num_urls_added}
for entry in tqdm(data):
is_valid = validate_doi(entry['doi'])
# TODO: what if not valid? user does not get it back in the api response.
if is_valid:
if entry['doi'] and entry['date']:
doi = entry['doi']
result_doi = Doi.query.filter_by(doi=doi).first()
if result_doi is None:
try:
db_doi = Doi(
doi=doi,
date_published=datetime.strptime(entry['date'], '%Y-%m-%d'),
import_id=import_id,
is_valid=True
)
db.session.add(db_doi)
db.session.commit()
num_dois_added += 1
dois_added.append(doi)
except:
print('ERROR: Can not import Doi {0}.'.format(doi))
else:
doi = result_doi.doi
else:
print('WARNING: Entry {0} is not valid'.format(doi))
# store url
if entry['url'] and entry['url_type']:
url = entry['url']
result_url = Url.query.filter_by(url=url).first()
if result_url is None:
try:
db_url = Url(
url=url,
doi=doi,
url_type=entry['url_type']
)
db.session.add(db_url)
db.session.commit()
num_urls_added += 1
except:
print('ERROR: Can not import Url {0}.'.format(url))
else:
print('WARNING: DOI {} is not valid.'.format(entry['doi']))
print('{0} doi\'s added to database.'.format(num_dois_added))
print('{0} url\'s added to database.'.format(num_urls_added))
return {'dois_added': dois_added, 'num_dois_added': num_dois_added, 'num_urls_added': num_urls_added}
def import_dois_from_api(data):
"""Import data coming from the API endpoint.
Parameters
----------
data : type
Description of parameter `data`.
Returns
-------
string
Response text for API request.
"""
from app.models import Import
try:
imp = Import('<API>', dumps(data))
db.session.add(imp)
db.session.commit()
response = add_entries_to_database(data, imp.id)
return response
except:
response = 'ERROR: Data import from API not working.'
print(response)
return response
def create_doi_new_urls(batch_size):
"""Create URL's from the identifier.
Creates the DOI URL's as part of the pre-processing.
"""
from app.models import Doi
from app.models import Url
import app
num_urls_added = 0
db_urls = []
urls_added = []
# get all URL's in the database
query = db.session.query(Url.url)
for row in query:
db_urls.append(row.url)
# get doi, url_doi_new=False and url
result_join = db.session.query(Doi).join(Url).filter(Doi.doi == Url.doi).filter(Doi.url_doi_new == False).all()
for i in range(0, len(result_join), batch_size):
for d in result_join[i:i+batch_size]:
doi_url_encoded = urllib.parse.quote(d.doi)
url = 'https://doi.org/{0}'.format(doi_url_encoded)
if url not in db_urls and url not in urls_added:
try:
db_url = Url(
url=url,
doi=d.doi,
url_type='doi_new'
)
d.url_doi_new = True
db.session.add(db_url)
num_urls_added += 1
urls_added.append(url)
except:
print('WARNING: Url {0} can not be created.'.format(url))
db.session.commit()
db.session.close()
print('{0} new doi url\'s added to database.'.format(num_urls_added))
def create_doi_old_urls(batch_size):
"""Create URL's from the identifier.
Creates the DOI URL's as part of the pre-processing.
"""
from app.models import Doi
from app.models import Url
num_urls_added = 0
db_urls = []
urls_added = []
# get all URL's in the database
query = db.session.query(Url.url)
for row in query:
db_urls.append(row.url)
# get doi, url_doi_old=False and url
result_join = db.session.query(Doi).join(Url).filter(Doi.doi == Url.doi).filter(Doi.url_doi_old == False).all()
for i in range(0, len(result_join), batch_size):
for d in result_join[i:i+batch_size]:
doi_url_encoded = urllib.parse.quote(d.doi)
url = 'http://dx.doi.org/{0}'.format(doi_url_encoded)
if url not in db_urls and url not in urls_added:
try:
db_url = Url(
url=url,
doi=d.doi,
url_type='doi_old'
)
d.url_doi_old = True
db.session.add(db_url)
num_urls_added += 1
urls_added.append(url)
except:
print('WARNING: Url {0} can not be created.'.format(url))
db.session.commit()
db.session.close()
print('{0} old doi url\'s added to database.'.format(num_urls_added))
def create_doi_lp_urls():
"""Create URL's from the identifier.
Creates the DOI URL's as part of the pre-processing.
"""
from app.models import APIRequest
from app.models import Doi
from app.models import Url
num_urls_added = 0
db_urls = []
urls_added = []
# get all URL's in the database
query = db.session.query(Url.url)
for row in query:
db_urls.append(row.url)
# create doi landing page url
result_join = db.session.query(Doi).join(Url).filter(Doi.doi == Url.doi).filter(Doi.url_doi_lp == False).all()
for d in tqdm(result_join):
doi_url_encoded = urllib.parse.quote(d.doi)
url = 'https://doi.org/{0}'.format(doi_url_encoded)
resp = request_doi_landingpage_api(url)
resp_url = resp.url
try:
db_api = APIRequest(
doi=d.doi,
request_url=url,
request_type='doi_landingpage',
response_content=resp.content,
response_status=resp.status_code
)
db.session.add(db_api)
except:
print('WARNING: APIRequest can not be created.')
if resp_url not in db_urls and resp_url not in urls_added:
db_url = Url(
url=resp_url,
doi=d.doi,
url_type='doi_landingpage'
)
d.url_doi_lp = True
db.session.add(db_url)
num_urls_added += 1
urls_added.append(resp_url)
db.session.commit()
db.session.close()
print('{0} doi new landing page doi url\'s added to database.'.format(num_urls_added))
def request_doi_landingpage_api(url):
return requests.get(url, allow_redirects=True)
def create_ncbi_urls(ncbi_tool, ncbi_email):
"""Create NCBI URL's from the identifier.
https://www.ncbi.nlm.nih.gov/pmc/tools/id-converter-api/
Parameters
----------
ncbi_tool : string
Name of tool, which want to connect to the NCBI API.
email : string
Email related to the app, used as credential for the request.
"""
from app.models import APIRequest
from app.models import Doi
from app.models import Url
num_urls_pm_added = 0
num_urls_pmc_added = 0
db_urls = []
urls_added = []
# get all URL's in the database
query = db.session.query(Url.url)
for row in query:
db_urls.append(row.url)
result_join = db.session.query(Doi).join(Url).filter(Doi.doi == Url.doi).filter(Doi.url_pm == False or Doi.url_pmc == False).all()
for d in tqdm(result_join):
# TODO: allows up to 200 ids sent at the same time
# send request to NCBI API
doi_url_encoded = urllib.parse.quote(d.doi)
url = 'https://www.ncbi.nlm.nih.gov/pmc/utils/idconv/v1.0/?ids={0}'.format(doi_url_encoded)
resp_data = request_ncbi_api(url, ncbi_tool, ncbi_email, d.doi)
db_ncbi = APIRequest(
# doi=d.doi,
doi=d.doi,
request_url=url,
request_type='ncbi',
response_content=dumps(resp_data),
response_status=resp.status_code
)
db.session.add(db_ncbi)
if 'records' in resp_data:
# create PMC url
if 'pmcid' in resp_data['records']:
url_pmc = 'https://ncbi.nlm.nih.gov/pmc/articles/PMC{0}/'.format(
resp_data['records']['pmcid'])
if url not in db_urls and url not in urls_added:
db_url_pmc = Url(
doi=d.doi,
url_type='pmc'
)
d.url_pmc = True
db.session.add(db_url_pmc)
num_urls_pmc_added += 1
urls_added.append(url_pmc)
# create PM url
if 'pmid' in resp_data['records']:
url_pm = 'https://www.ncbi.nlm.nih.gov/pubmed/{0}'.format(
resp_data['records']['pmid'])
if Url.query.filter_by(url=url_pm).first() is None:
db_url_pm = Url(
url=url_pm,
doi=d.doi,
url_type='pm'
)
d.url_pm = True
db.session.add(db_url_pm)
num_urls_pm_added += 1
urls_added.append(url_pmc)
db.session.commit()
db.session.close()
print('{0} PM url\'s added to database.'.format(num_urls_pm_added))
print('{0} PMC url\'s added to database.'.format(num_urls_pmc_added))
def request_ncbi_api(url, ncbi_tool, ncbi_email, doi):
resp = requests.get(url, params={
'tool': ncbi_tool,
'email': ncbi_email,
'idtype': 'doi',
'versions': 'no',
'format': 'json'
})
return resp.json()
def create_unpaywall_urls(email):
"""Create Unpaywall URL's from the identifier.
https://unpaywall.org/products/api
Parameters
----------
email : string
Email related to the app, used as credential for the request.
"""
from app.models import APIRequest
from app.models import Doi
from app.models import Url
num_urls_unpaywall_added = 0
db_urls = []
urls_added = []
# get all URL's in the database
query = db.session.query(Url.url)
for row in query:
db_urls.append(row.url)
result_join = db.session.query(Doi).join(Url).filter(Doi.doi == Url.doi).filter(Doi.url_unpaywall == False).all()
for d in tqdm(result_join):
# send request to Unpaywall API
url_dict = {}
doi_url_encoded = urllib.parse.quote(d.doi)
url = 'https://api.unpaywall.org/v2/{0}?email={1}'.format(doi_url_encoded, email)
resp_data = request_unpaywall_api(url)
db_api = APIRequest(
doi=d.doi,
request_url=url,
request_type='unpaywall',
response_content=dumps(resp_data),
response_status=resp_data.status_code
)
d.url_unpaywall = True
db.session.add(db_api)
db.session.commit()
# check if response includes needed data
if 'doi_url' in resp_data:
url_dict['unpaywall_doi_url'] = resp_data['doi_url']
if 'oa_locations' in resp_data:
for loc in resp_data['oa_locations']:
if 'url_for_pdf' in loc:
if loc['url_for_pdf']:
url_dict['unpaywall_url_for_pdf'] = loc['url_for_pdf']
if 'url' in loc:
if loc['url']:
url_dict['unpaywall_url'] = loc['url']
if 'url_for_landing_page' in loc:
if loc['url_for_landing_page']:
url_dict['unpaywall_url_for_landing_page'] = loc['url_for_landing_page']
# store URL's in database
for url_type, url in url_dict.items():
if url not in db_urls and url not in urls_added:
db_url = Url(
url=url,
doi=d.doi,
url_type=url_type
)
d.url_unpaywall = True
db.session.add(db_url)
num_urls_unpaywall_added += 1
urls_added.append(url)
db.session.commit()
db.session.close()
print('{0} Unpaywall url\'s added to database.'.format(num_urls_unpaywall_added))
def request_unpaywall_api(url):
resp = requests.get(url)
return resp.json()
def fb_requests(app_id, app_secret, batch_size):
"""Get app access token.
Example Response:
{'id': 'http://dx.doi.org/10.22230/src.2010v1n2a24',
'engagement': { 'share_count': 0, 'comment_plugin_count': 0,
'reaction_count': 0, 'comment_count': 0}}
"""
from app.models import FBRequest
from app.models import Url
payload = {'grant_type': 'client_credentials',
'client_id': app_id,
'client_secret': app_secret}
try:
response = requests.post(
'https://graph.facebook.com/oauth/access_token?',
params=payload)
except requests.exceptions.RequestException:
raise Exception()
token = loads(response.text)
fb_graph = GraphAPI(token['access_token'], version="2.10")
fb_request_added = 0
result_url = Url.query.all()
for i in range(0, len(result_url), batch_size):
batch = result_url[i:i + batch_size]
url_list = []
for row in batch:
url_list.append(row.url)
urls_response = fb_graph.get_objects(ids=url_list,
fields="engagement,og_object")
for key, value in urls_response.items():
if urls_response:
db_fb_request = FBRequest(
url=key,
response=value
)
db.session.add(db_fb_request)
fb_request_added += 1
db.session.commit()
db.session.close()
print('{0} Facebook openGraph request\'s added to database.'.format(fb_request_added))
def delete_dois():
"""Delete all doi entries."""
from app.models import Doi
try:
dois_deleted = db.session.query(Doi).delete()
db.session.commit()
print(dois_deleted, 'doi\'s deleted from database.')
except:
db.session.rollback()
print('ERROR: Doi\'s can not be deleted from database.')
def delete_urls():
"""Delete all url entries."""
from app.models import Url
try:
urls_deleted = db.session.query(Url).delete()
db.session.commit()
print(urls_deleted, 'url\'s deleted from database.')
except:
db.session.rollback()
print('ERROR: Url\'s can not be deleted from database.')
def delete_apirequests():
"""Delete all api requests."""
from app.models import APIRequest
try:
apirequests_deleted = db.session.query(APIRequest).delete()
db.session.commit()
print(apirequests_deleted, 'APIRequests\'s deleted from database.')
except:
db.session.rollback()
print('ERROR: API requests\'s can not be deleted from database.')
def delete_fbrequests():
"""Delete all facebook requests."""
from app.models import FBRequest
try:
fbrequests_deleted = db.session.query(FBRequest).delete()
db.session.commit()
print(fbrequests_deleted, 'FBRequests\'s deleted from database.')
except:
db.session.rollback()
print('ERROR: Facebook requests\'s can not be deleted from database.')
def export_tables_to_csv(table_names, db_uri):
"""Short summary.
Parameters
----------
table_names : list
Description of parameter `table_names`.
db_uri : string
Description of parameter `db_uri`.
"""
con = connect(db_uri)
cur = con.cursor()
filename_list = [BASE_DIR + '/app/static/export/'+datetime.today().strftime('%Y-%m-%d')+'_'+table+'.csv' for table in table_names]
for idx, filename in enumerate(filename_list):
sql = "COPY "+table_names[idx]+" TO STDOUT DELIMITER ',' CSV HEADER;"
cur.copy_expert(sql, open(filename, "w"))
def import_csv(table_names, delete_tables):
"""Import data coming from CSV file."""
from app import import_csv_recreate
from app import import_csv_append
if delete_tables:
import_csv_recreate(table_names)
else:
import_csv_append(table_names)
def import_csv_recreate(table_names):
"""Import data coming from CSV file.
Delete all data in advance and do fresh import.
"""
from app import delete_data
from app.models import Import
from app.models import Doi
from app.models import Url
from app.models import APIRequest
from app.models import FBRequest
table2model = {
'doi': Doi,
'url': Url,
'api_request': APIRequest,
'fb_request': FBRequest
}
delete_data()
filename_list = [BASE_DIR + '/app/static/import/'+table+'.csv' for table in table_names]
for idx, filename in enumerate(filename_list):
model = table2model[table_names[idx]]
df = pd.read_csv(filename)
data_str = df.to_json(orient='records')
db_imp = Import('<Import '+filename+'>', data_str)
db.session.add(db_imp)
db.session.commit()
for row in df.to_dict(orient="records"):
if table_names[idx] == 'doi':
model = Doi(**row)
elif table_names[idx] == 'url':
model = Url(**row)
elif table_names[idx] == 'api_request':
model = APIRequest(**row)
elif table_names[idx] == 'fb_request':
model = FBRequest(**row)
db.session.add(model)
db.session.commit()
def import_csv_append(table_names):
"""Import data coming from CSV file.
Insert all data in advance and do fresh import.
"""
from app.models import Import
from app.models import Doi
from app.models import Url
from app.models import APIRequest
from app.models import FBRequest
for table_name in table_names:
filename = BASE_DIR + '/app/static/import/'+table_name+'.csv'
df = pd.read_csv(filename, encoding='utf8')
data_str = df.to_json(orient='records')
data = df.to_dict(orient='records')
db_imp = Import('<Import '+filename+'>', data_str)
db.session.add(db_imp)
db.session.commit()
if table_name == 'doi':
print('Import Doi table:')
dois_added = 0
for entry in tqdm(data):
result_doi = Doi.query.filter_by(doi=entry['doi']).first()
if result_doi is None:
if entry['is_valid'] == 't':
is_valid = True
elif entry['is_valid'] == 'f':
is_valid = False
if entry['url_doi_lp'] == 't':
url_doi_lp = True
elif entry['url_doi_lp'] == 'f':
url_doi_lp = False
if entry['url_doi_new'] == 't':
url_doi_new = True
elif entry['url_doi_new'] == 'f':
url_doi_new = False
if entry['url_doi_old'] == 't':
url_doi_old = True
elif entry['url_doi_old'] == 'f':
url_doi_old = False
if entry['url_pm'] == 't':
url_pm = True
elif entry['url_pm'] == 'f':
url_pm = False
if entry['url_pmc'] == 't':
url_pmc = True
elif entry['url_pmc'] == 'f':
url_pmc = False
if entry['url_unpaywall'] == 't':
url_unpaywall = True
elif entry['url_unpaywall'] == 'f':
url_unpaywall = False
db_doi = Doi(
doi=entry['doi'],
import_id=db_imp.id,
is_valid=is_valid,
pm_id=entry['pm_id'],
pmc_id=entry['pmc_id'],
date_published=datetime.strptime(entry['date_published'], '%Y-%m-%d %H:%M:%S'),
url_doi_lp=url_doi_lp,
url_doi_new=url_doi_new,
url_doi_old=url_doi_old,
url_pm=url_pm,
url_pmc=url_pmc,
url_unpaywall=url_unpaywall
)
db.session.add(db_doi)
db.session.commit()
dois_added += 1
print('{0} doi\'s added to database.'.format(dois_added))
elif table_name == 'url':
print('Import Url table:')
urls_added = 0
for entry in tqdm(data):
result_url = Url.query.filter_by(url=entry['url']).first()
if result_url is None:
db_url = Url(
url=entry['url'],
doi=entry['doi'],
url_type=entry['url_type'],
date_added=datetime.strptime(entry['date_added'], '%Y-%m-%d %H:%M:%S.%f')
)
db.session.add(db_url)
db.session.commit()
urls_added += 1
print('{0} url\'s added to database.'.format(urls_added))
elif table_name == 'api_request':
print('Import APIRequests table:')
apirequests_added = 0
for entry in tqdm(data):
db_apirequest = APIRequest(
doi=entry['doi'],
request_url=entry['request_url'],
request_type=entry['request_type'],
response_content=entry['response_content'],
response_status=entry['response_status']
)
db.session.add(db_apirequest)
db.session.commit()
apirequests_added += 1
print('{0} apirequest\'s added to database.'.format(apirequests_added))
elif table_name == 'fb_request':
print('Import FBRequests table:')
fbrequests_added = 0
for entry in tqdm(data):
db_fbrequest = FBRequest(
url_url=entry['url_url'],
response=entry['response'],
reactions=entry['reactions'],
shares=entry['shares'],
comments=entry['comments'],
plugin_comments=entry['plugin_comments'],
timestamp=datetime.strptime(entry['timestamp'], '%Y-%m-%d %H:%M:%S.%f')
)
db.session.add(db_fbrequest)
db.session.commit()
fbrequests_added += 1
print('{0} fbrequest\'s added to database.'.format(fbrequests_added))
def create_app():
"""Create application and load settings."""
app = Flask(__name__)
ENVIRONMENT = os.getenv('ENV', default='development')
# TESTING = os.getenv('TESTING', default=False)
print('* Updating App Mode to: ' + ENVIRONMENT)
travis = os.getenv('TRAVIS', default=False)
if not travis:
print('* Loading User Settings.')
app.config.from_pyfile(BASE_DIR+'/settings_user.py', silent=True)
if ENVIRONMENT == 'development':
print('* Loading Development Settings.')
app.config.from_pyfile(BASE_DIR+'/settings_development.py', silent=True)
app.config.from_object('settings_default.Development')
if not travis:
DebugToolbarExtension(app)
elif ENVIRONMENT == 'production':
print('* Loading Production Settings.')
# order of settings loading: 1. settings file, 2. environment variable DATABASE_URL, 3. environment variable SQLALCHEMY_DATABASE_URI
if not travis:
app.config.from_pyfile(BASE_DIR+'/settings_production.py', silent=True)
app.config.from_object('settings_default.Production')
elif ENVIRONMENT == 'testing':
print('* Loading Test Settings.')
app.config['TESTING'] = True
app.config.from_object('settings_default.Testing')
if not travis:
print('* Database: ' + app.config['SQLALCHEMY_DATABASE_URI'])
db.init_app(app)
migrate.init_app(app, db)
from app.errors import bp as errors_bp
app.register_blueprint(errors_bp)
from app.main import bp as main_bp
app.register_blueprint(main_bp)
# scheduler = BackgroundScheduler()
# rate_limit = app.config['FB_HOURLY_RATELIMIT']
# rate_intervall = 3600 / rate_limit
# scheduler.add_job(, trigger='interval', seconds=rate_intervall)
# scheduler.start()
if not app.debug and not app.testing:
# Logging (only production)
if not os.path.exists('logs'):
os.mkdir('logs')
file_handler = RotatingFileHandler('logs/fhe.log', maxBytes=10240, backupCount=10)
file_handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'))
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.INFO)
app.logger.info('Facebook Hidden Engagement')
return app
from app import models
| [
"[email protected]"
]
| |
10db09bd205a4767ad04c2ad9a7ae71e296af40f | 296132d2c5d95440b3ce5f4401078a6d0f736f5a | /homeassistant/components/xiaomi_ble/sensor.py | 831b5d0910be035820e0172f6706c2b06edb2f0c | [
"Apache-2.0"
]
| permissive | mezz64/home-assistant | 5349a242fbfa182159e784deec580d2800173a3b | 997d4fbe5308b01d14ceabcfe089c2bc511473dd | refs/heads/dev | 2023-03-16T22:31:52.499528 | 2022-12-08T02:55:25 | 2022-12-08T02:55:25 | 68,411,158 | 2 | 1 | Apache-2.0 | 2023-03-10T06:56:54 | 2016-09-16T20:04:27 | Python | UTF-8 | Python | false | false | 6,812 | py | """Support for xiaomi ble sensors."""
from __future__ import annotations
from typing import Optional, Union
from xiaomi_ble import DeviceClass, SensorUpdate, Units
from homeassistant import config_entries
from homeassistant.components.bluetooth.passive_update_processor import (
PassiveBluetoothDataProcessor,
PassiveBluetoothDataUpdate,
PassiveBluetoothProcessorCoordinator,
PassiveBluetoothProcessorEntity,
)
from homeassistant.components.sensor import (
SensorDeviceClass,
SensorEntity,
SensorEntityDescription,
SensorStateClass,
)
from homeassistant.const import (
CONCENTRATION_MILLIGRAMS_PER_CUBIC_METER,
CONDUCTIVITY,
ELECTRIC_POTENTIAL_VOLT,
LIGHT_LUX,
PERCENTAGE,
PRESSURE_MBAR,
SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
TEMP_CELSIUS,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.sensor import sensor_device_info_to_hass_device_info
from .const import DOMAIN
from .device import device_key_to_bluetooth_entity_key
SENSOR_DESCRIPTIONS = {
(DeviceClass.BATTERY, Units.PERCENTAGE): SensorEntityDescription(
key=f"{DeviceClass.BATTERY}_{Units.PERCENTAGE}",
device_class=SensorDeviceClass.BATTERY,
native_unit_of_measurement=PERCENTAGE,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
),
(DeviceClass.CONDUCTIVITY, Units.CONDUCTIVITY): SensorEntityDescription(
key=str(Units.CONDUCTIVITY),
device_class=None,
native_unit_of_measurement=CONDUCTIVITY,
state_class=SensorStateClass.MEASUREMENT,
),
(
DeviceClass.FORMALDEHYDE,
Units.CONCENTRATION_MILLIGRAMS_PER_CUBIC_METER,
): SensorEntityDescription(
key=f"{DeviceClass.FORMALDEHYDE}_{Units.CONCENTRATION_MILLIGRAMS_PER_CUBIC_METER}",
native_unit_of_measurement=CONCENTRATION_MILLIGRAMS_PER_CUBIC_METER,
state_class=SensorStateClass.MEASUREMENT,
),
(DeviceClass.HUMIDITY, Units.PERCENTAGE): SensorEntityDescription(
key=f"{DeviceClass.HUMIDITY}_{Units.PERCENTAGE}",
device_class=SensorDeviceClass.HUMIDITY,
native_unit_of_measurement=PERCENTAGE,
state_class=SensorStateClass.MEASUREMENT,
),
(DeviceClass.ILLUMINANCE, Units.LIGHT_LUX): SensorEntityDescription(
key=f"{DeviceClass.ILLUMINANCE}_{Units.LIGHT_LUX}",
device_class=SensorDeviceClass.ILLUMINANCE,
native_unit_of_measurement=LIGHT_LUX,
state_class=SensorStateClass.MEASUREMENT,
),
(DeviceClass.MOISTURE, Units.PERCENTAGE): SensorEntityDescription(
key=f"{DeviceClass.MOISTURE}_{Units.PERCENTAGE}",
device_class=SensorDeviceClass.MOISTURE,
native_unit_of_measurement=PERCENTAGE,
state_class=SensorStateClass.MEASUREMENT,
),
(DeviceClass.PRESSURE, Units.PRESSURE_MBAR): SensorEntityDescription(
key=f"{DeviceClass.PRESSURE}_{Units.PRESSURE_MBAR}",
device_class=SensorDeviceClass.PRESSURE,
native_unit_of_measurement=PRESSURE_MBAR,
state_class=SensorStateClass.MEASUREMENT,
),
(
DeviceClass.SIGNAL_STRENGTH,
Units.SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
): SensorEntityDescription(
key=f"{DeviceClass.SIGNAL_STRENGTH}_{Units.SIGNAL_STRENGTH_DECIBELS_MILLIWATT}",
device_class=SensorDeviceClass.SIGNAL_STRENGTH,
native_unit_of_measurement=SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
state_class=SensorStateClass.MEASUREMENT,
entity_registry_enabled_default=False,
entity_category=EntityCategory.DIAGNOSTIC,
),
(DeviceClass.TEMPERATURE, Units.TEMP_CELSIUS): SensorEntityDescription(
key=f"{DeviceClass.TEMPERATURE}_{Units.TEMP_CELSIUS}",
device_class=SensorDeviceClass.TEMPERATURE,
native_unit_of_measurement=TEMP_CELSIUS,
state_class=SensorStateClass.MEASUREMENT,
),
(DeviceClass.VOLTAGE, Units.ELECTRIC_POTENTIAL_VOLT): SensorEntityDescription(
key=f"{DeviceClass.VOLTAGE}_{Units.ELECTRIC_POTENTIAL_VOLT}",
device_class=SensorDeviceClass.VOLTAGE,
native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
),
# Used for e.g. consumable sensor on WX08ZM
(None, Units.PERCENTAGE): SensorEntityDescription(
key=str(Units.PERCENTAGE),
device_class=None,
native_unit_of_measurement=PERCENTAGE,
state_class=SensorStateClass.MEASUREMENT,
),
}
def sensor_update_to_bluetooth_data_update(
sensor_update: SensorUpdate,
) -> PassiveBluetoothDataUpdate:
"""Convert a sensor update to a bluetooth data update."""
return PassiveBluetoothDataUpdate(
devices={
device_id: sensor_device_info_to_hass_device_info(device_info)
for device_id, device_info in sensor_update.devices.items()
},
entity_descriptions={
device_key_to_bluetooth_entity_key(device_key): SENSOR_DESCRIPTIONS[
(description.device_class, description.native_unit_of_measurement)
]
for device_key, description in sensor_update.entity_descriptions.items()
if description.native_unit_of_measurement
},
entity_data={
device_key_to_bluetooth_entity_key(device_key): sensor_values.native_value
for device_key, sensor_values in sensor_update.entity_values.items()
},
entity_names={
device_key_to_bluetooth_entity_key(device_key): sensor_values.name
for device_key, sensor_values in sensor_update.entity_values.items()
},
)
async def async_setup_entry(
hass: HomeAssistant,
entry: config_entries.ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the Xiaomi BLE sensors."""
coordinator: PassiveBluetoothProcessorCoordinator = hass.data[DOMAIN][
entry.entry_id
]
processor = PassiveBluetoothDataProcessor(sensor_update_to_bluetooth_data_update)
entry.async_on_unload(
processor.async_add_entities_listener(
XiaomiBluetoothSensorEntity, async_add_entities
)
)
entry.async_on_unload(coordinator.async_register_processor(processor))
class XiaomiBluetoothSensorEntity(
PassiveBluetoothProcessorEntity[
PassiveBluetoothDataProcessor[Optional[Union[float, int]]]
],
SensorEntity,
):
"""Representation of a xiaomi ble sensor."""
@property
def native_value(self) -> int | float | None:
"""Return the native value."""
return self.processor.entity_data.get(self.entity_key)
| [
"[email protected]"
]
| |
33db5512963f5b8c6d5910b476d375ebec537462 | 414393a5048e5212223051d6a5541ecb873bcc53 | /imagenet_VGG16/main_PFSUM.py | 00bece7144bfcbbe48d1335b150d8de00d3d18ec | []
| no_license | byh1321/CIFAR100_Distorted_Channel_Selective | 5a0fc1107ab9d60ce12504a8e474144762eda8df | 897f2dea4e645329dfc3bf3df6b147c783bfa83f | refs/heads/master | 2020-03-21T02:31:24.024771 | 2019-08-12T05:59:53 | 2019-08-12T05:59:53 | 138,002,631 | 0 | 0 | null | 2019-08-02T02:26:49 | 2018-06-20T08:26:51 | Python | UTF-8 | Python | false | false | 21,685 | py | from __future__ import print_function
import time
import torch
import torch.nn as nn
import torchvision.datasets as datasets
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
from torch.autograd import Variable
import torch.nn.functional as F
import torchvision.models as models
import argparse
import torch.optim as optim
import pytorch_fft.fft as fft
import utils
from utils import progress_bar
import os
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
parser.add_argument('--se', default=0, type=int, help='start epoch')
parser.add_argument('--ne', default=0, type=int, help='number of epoch')
parser.add_argument('--bs', default=128, type=int, help='batch size')
parser.add_argument('--mode', default=1, type=int, help='train or inference') #mode=1 is train, mode=0 is inference
parser.add_argument('--fixed', default=0, type=int, help='quantization') #mode=1 is train, mode=0 is inference
parser.add_argument('--gau', type=float, default=0, metavar='N',help='gaussian noise standard deviation')
parser.add_argument('--blur', type=float, default=0, metavar='N',help='blur noise standard deviation')
parser.add_argument('--samplesize', default=0, type=int, help='set sample size')
parser.add_argument('--outputfile', default='garbage.txt', help='output file name', metavar="FILE")
args = parser.parse_args()
use_cuda = torch.cuda.is_available()
best_acc = 0 # best test accuracy
traindir = os.path.join('/usr/share/ImageNet/train')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(traindir,transforms.Compose([transforms.RandomSizedCrop(224),transforms.RandomHorizontalFlip(),transforms.ToTensor(),normalize,]))
train_sub_dataset, dump = torch.utils.data.random_split(train_dataset,[args.samplesize,(len(train_dataset)-args.samplesize)])
train_loader = torch.utils.data.DataLoader(train_sub_dataset, batch_size=1, shuffle=True, num_workers=8, pin_memory=True)
#train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=1, shuffle=True,num_workers=8, pin_memory=True)
valdir = os.path.join('/usr/share/ImageNet/val')
val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(valdir,transforms.Compose([transforms.Scale(256),transforms.CenterCrop(224),transforms.ToTensor(),normalize])),batch_size=128, shuffle=False,num_workers=1, pin_memory=True)
class VGG16(nn.Module):
def __init__(self):
super(VGG16,self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
)
self.conv2 = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.conv3 = nn.Sequential(
nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
)
self.conv4 = nn.Sequential(
nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.conv5 = nn.Sequential(
nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
)
self.conv6 = nn.Sequential(
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
)
self.conv7 = nn.Sequential(
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.conv8 = nn.Sequential(
nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
)
self.conv9 = nn.Sequential(
nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
)
self.conv10 = nn.Sequential(
nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.conv11 = nn.Sequential(
nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
)
self.conv12 = nn.Sequential(
nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
)
self.conv13 = nn.Sequential(
nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.linear1 = nn.Sequential(
nn.Linear(25088, 4096),
nn.ReLU(inplace=True),
nn.Dropout(p=0.5),
)
self.linear2 = nn.Sequential(
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(p=0.5),
)
self.linear3 = nn.Sequential(
nn.Linear(4096, 1000),
)
def forward(self,x):
if (args.gau==0)&(args.blur==0):
#no noise
pass
elif (args.blur == 0)&(args.gau != 0):
#gaussian noise add
gau_kernel = torch.randn(x.size())*args.gau
x = Variable(gau_kernel.cuda()) + x
elif (args.gau == 0)&(args.blur != 0):
#blur noise add
blur_kernel_partial = torch.FloatTensor(utils.genblurkernel(args.blur))
blur_kernel_partial = torch.matmul(blur_kernel_partial.unsqueeze(1),torch.transpose(blur_kernel_partial.unsqueeze(1),0,1))
kernel_size = blur_kernel_partial.size()[0]
zeros = torch.zeros(kernel_size,kernel_size)
blur_kernel = torch.cat((blur_kernel_partial,zeros,zeros,
zeros,blur_kernel_partial,zeros,
zeros,zeros,blur_kernel_partial),0)
blur_kernel = blur_kernel.view(3,3,kernel_size,kernel_size)
blur_padding = int((blur_kernel_partial.size()[0]-1)/2)
#x = torch.nn.functional.conv2d(x, weight=blur_kernel.cuda(), padding=blur_padding)
x = torch.nn.functional.conv2d(x, weight=Variable(blur_kernel.cuda()), padding=blur_padding)
elif (args.gau != 0) & (args.blur != 0):
#both gaussian and blur noise added
blur_kernel_partial = torch.FloatTensor(utils.genblurkernel(args.blur))
blur_kernel_partial = torch.matmul(blur_kernel_partial.unsqueeze(1),torch.transpose(blur_kernel_partial.unsqueeze(1),0,1))
kernel_size = blur_kernel_partial.size()[0]
zeros = torch.zeros(kernel_size,kernel_size)
blur_kernel = torch.cat((blur_kernel_partial,zeros,zeros,
zeros,blur_kernel_partial,zeros,
zeros,zeros,blur_kernel_partial),0)
blur_kernel = blur_kernel.view(3,3,kernel_size,kernel_size)
blur_padding = int((blur_kernel_partial.size()[0]-1)/2)
x = torch.nn.functional.conv2d(x, weight=Variable(blur_kernel.cuda()), padding=blur_padding)
gau_kernel = torch.randn(x.size())*args.gau
x = Variable(gau_kernel.cuda()) + x
else:
print("Something is wrong in noise adding part")
exit()
tmp = Variable(torch.zeros(1,3,224,224).cuda())
f = fft.Fft2d()
fft_rout, fft_iout = f(x, tmp)
mag = torch.sqrt(torch.mul(fft_rout,fft_rout) + torch.mul(fft_iout,fft_iout))
tmp = torch.zeros(1,1,224,224).cuda()
tmp = torch.add(torch.add(mag[:,0,:,:],mag[:,1,:,:]),mag[:,2,:,:])
tmp = torch.abs(tmp)
PFSUM = 0
for i in range(0,224):
for j in range(0,224):
if (i+j) < 111:
print_value = 0
elif (i-j) > 112:
print_value = 0
elif (j-i) > 112:
print_value = 0
elif (i+j) > 335:
print_value = 0
else:
PFSUM = PFSUM + tmp[0,i,j]
f = open(args.outputfile,'a+')
print(PFSUM.item(),file=f)
f.close()
'''
f = open(args.outputfile,'a+')
for i in range(0,224):
for j in range(0,224):
print(tmp[0,i,j].item()/3,file = f)
f.close()
exit()
'''
"""
if args.fixed:
x = quant(x)
x = roundmax(x)
out = self.conv1(x)
if args.fixed:
out = quant(out)
out = roundmax(out)
out = self.conv2(out)
if args.fixed:
out = quant(out)
out = roundmax(out)
out = self.conv3(out)
if args.fixed:
out = quant(out)
out = roundmax(out)
out = self.conv4(out)
if args.fixed:
out = quant(out)
out = roundmax(out)
out = self.conv5(out)
if args.fixed:
out = quant(out)
out = roundmax(out)
out = self.conv6(out)
if args.fixed:
out = quant(out)
out = roundmax(out)
out = self.conv7(out)
if args.fixed:
out = quant(out)
out = roundmax(out)
out = self.conv8(out)
if args.fixed:
out = quant(out)
out = roundmax(out)
out = self.conv9(out)
if args.fixed:
out = quant(out)
out = roundmax(out)
out = self.conv10(out)
if args.fixed:
out = quant(out)
out = roundmax(out)
out = self.conv11(out)
if args.fixed:
out = quant(out)
out = roundmax(out)
out = self.conv12(out)
if args.fixed:
out = quant(out)
out = roundmax(out)
out = self.conv13(out)
out = out.view(out.size(0), -1)
if args.fixed:
out = quant(out)
out = roundmax(out)
out = self.linear1(out)
if args.fixed:
out = quant(out)
out = roundmax(out)
out = self.linear2(out)
if args.fixed:
out = quant(out)
out = roundmax(out)
out = self.linear3(out)
if args.fixed:
out = quant(out)
out = roundmax(out)
"""
out = torch.zeros(1000)
return out
if args.mode == 0:
print('==> Resuming from checkpoint..')
assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
checkpoint = torch.load('./checkpoint/ckpt_20180813.t0')
net = checkpoint['net']
elif args.mode == 1:
if args.resume:
print('==> Resuming from checkpoint..')
assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
checkpoint = torch.load('./checkpoint/ckpt_20180813.t0')
best_acc = checkpoint['acc']
net = checkpoint['net']
else:
print('==> Building model..')
net = VGG16()
elif args.mode == 2:
checkpoint = torch.load('./checkpoint/ckpt_20180813.t0')
net = checkpoint['net']
if args.resume:
print('==> Resuming from checkpoint..')
best_acc = checkpoint['acc']
else:
best_acc = 0
if use_cuda:
#print(torch.cuda.device_count())
net.cuda()
net = torch.nn.DataParallel(net, device_ids=range(0,1))
cudnn.benchmark = True
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4)
start_epoch = args.se
num_epoch = args.ne
###################################################################################
# Copied this part from https://github.com/pytorch/examples/blob/master/imagenet/main.py
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
######################################################################################
def paramsget():
params = net.conv1[0].weight.view(-1,)
params = torch.cat((params,net.conv2[0].weight.view(-1,)),0)
params = torch.cat((params,net.conv3[0].weight.view(-1,)),0)
params = torch.cat((params,net.conv4[0].weight.view(-1,)),0)
params = torch.cat((params,net.conv5[0].weight.view(-1,)),0)
params = torch.cat((params,net.conv6[0].weight.view(-1,)),0)
params = torch.cat((params,net.conv7[0].weight.view(-1,)),0)
params = torch.cat((params,net.conv8[0].weight.view(-1,)),0)
params = torch.cat((params,net.conv9[0].weight.view(-1,)),0)
params = torch.cat((params,net.conv10[0].weight.view(-1,)),0)
params = torch.cat((params,net.conv11[0].weight.view(-1,)),0)
params = torch.cat((params,net.conv12[0].weight.view(-1,)),0)
params = torch.cat((params,net.conv13[0].weight.view(-1,)),0)
params = torch.cat((params,net.fc1[1].weight.view(-1,)),0)
params = torch.cat((params,net.fc2[1].weight.view(-1,)),0)
params = torch.cat((params,net.fc3[0].weight.view(-1,)),0)
#net = checkpoint['net']
return params
def findThreshold(params):
thres=0
while 1:
tmp = (torch.abs(params.data)<thres).type(torch.FloatTensor)
result = torch.sum(tmp)/params.size()[0]
if (args.pr/100)<result:
print("threshold : {}".format(thres))
return thres
else:
thres += 0.0001
def getPruningMask(thres):
mask = torch.load('mask_null.dat')
mask[0] = torch.abs(net.conv1[0].weight.data)>thres
mask[1] = torch.abs(net.conv2[0].weight.data)>thres
mask[2] = torch.abs(net.conv3[0].weight.data)>thres
mask[3] = torch.abs(net.conv4[0].weight.data)>thres
mask[4] = torch.abs(net.conv5[0].weight.data)>thres
mask[5] = torch.abs(net.conv6[0].weight.data)>thres
mask[6] = torch.abs(net.conv7[0].weight.data)>thres
mask[7] = torch.abs(net.conv8[0].weight.data)>thres
mask[8] = torch.abs(net.conv9[0].weight.data)>thres
mask[9] = torch.abs(net.conv10[0].weight.data)>thres
mask[10] = torch.abs(net.conv11[0].weight.data)>thres
mask[11] = torch.abs(net.conv12[0].weight.data)>thres
mask[12] = torch.abs(net.conv13[0].weight.data)>thres
mask[13] = torch.abs(net.fc1[1].weight.data)>thres
mask[14] = torch.abs(net.fc2[1].weight.data)>thres
mask[15] = torch.abs(net.fc3[0].weight.data)>thres
mask[0] = mask[0].type(torch.FloatTensor)
mask[1] = mask[1].type(torch.FloatTensor)
mask[2] = mask[2].type(torch.FloatTensor)
mask[3] = mask[3].type(torch.FloatTensor)
mask[4] = mask[4].type(torch.FloatTensor)
mask[5] = mask[5].type(torch.FloatTensor)
mask[6] = mask[6].type(torch.FloatTensor)
mask[7] = mask[7].type(torch.FloatTensor)
mask[8] = mask[8].type(torch.FloatTensor)
mask[9] = mask[9].type(torch.FloatTensor)
mask[10] = mask[10].type(torch.FloatTensor)
mask[11] = mask[11].type(torch.FloatTensor)
mask[12] = mask[12].type(torch.FloatTensor)
mask[13] = mask[13].type(torch.FloatTensor)
mask[14] = mask[14].type(torch.FloatTensor)
mask[15] = mask[15].type(torch.FloatTensor)
return mask
def pruneNetwork(mask):
for child in net.children():
for param in child.conv1[0].parameters():
param.grad.data = torch.mul(param.grad.data,mask[0].cuda())
param.data = torch.mul(param.data,mask[0].cuda())
for child in net.children():
for param in child.conv2[0].parameters():
param.grad.data = torch.mul(param.grad.data,mask[1].cuda())
param.data = torch.mul(param.data,mask[1].cuda())
for child in net.children():
for param in child.conv3[0].parameters():
param.grad.data = torch.mul(param.grad.data,mask[2].cuda())
param.data = torch.mul(param.data,mask[2].cuda())
for child in net.children():
for param in child.conv4[0].parameters():
param.grad.data = torch.mul(param.grad.data,mask[3].cuda())
param.data = torch.mul(param.data,mask[3].cuda())
for child in net.children():
for param in child.conv5[0].parameters():
param.grad.data = torch.mul(param.grad.data,mask[4].cuda())
param.data = torch.mul(param.data,mask[4].cuda())
for child in net.children():
for param in child.conv6[0].parameters():
param.grad.data = torch.mul(param.grad.data,mask[5].cuda())
param.data = torch.mul(param.data,mask[5].cuda())
for child in net.children():
for param in child.conv7[0].parameters():
param.grad.data = torch.mul(param.grad.data,mask[6].cuda())
param.data = torch.mul(param.data,mask[6].cuda())
for child in net.children():
for param in child.conv8[0].parameters():
param.grad.data = torch.mul(param.grad.data,mask[7].cuda())
param.data = torch.mul(param.data,mask[7].cuda())
for child in net.children():
for param in child.conv9[0].parameters():
param.grad.data = torch.mul(param.grad.data,mask[8].cuda())
param.data = torch.mul(param.data,mask[8].cuda())
for child in net.children():
for param in child.conv10[0].parameters():
param.grad.data = torch.mul(param.grad.data,mask[9].cuda())
param.data = torch.mul(param.data,mask[9].cuda())
for child in net.children():
for param in child.conv11[0].parameters():
param.grad.data = torch.mul(param.grad.data,mask[10].cuda())
param.data = torch.mul(param.data,mask[10].cuda())
for child in net.children():
for param in child.conv12[0].parameters():
param.grad.data = torch.mul(param.grad.data,mask[11].cuda())
param.data = torch.mul(param.data,mask[11].cuda())
for child in net.children():
for param in child.conv13[0].parameters():
param.grad.data = torch.mul(param.grad.data,mask[12].cuda())
param.data = torch.mul(param.data,mask[12].cuda())
for child in net.children():
for param in child.fc1[1].parameters():
param.grad.data = torch.mul(param.grad.data,mask[13].cuda())
param.data = torch.mul(param.data,mask[13].cuda())
for child in net.children():
for param in child.fc2[1].parameters():
param.grad.data = torch.mul(param.grad.data,mask[14].cuda())
param.data = torch.mul(param.data,mask[14].cuda())
for child in net.children():
for param in child.fc3[0].parameters():
param.grad.data = torch.mul(param.grad.data,mask[15].cuda())
param.data = torch.mul(param.data,mask[15].cuda())
return
def train(epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
net.train()
end = time.time()
for batch_idx, (inputs, targets) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if use_cuda is not None:
inputs, targets = inputs.cuda(), targets.cuda()
# compute output
outputs = net(inputs)
'''
loss = criterion(outputs, targets)
# measure accuracy and record loss
prec1, prec5 = accuracy(outputs, targets, topk=(1, 5))
losses.update(loss.item(), inputs.size(0))
top1.update(prec1[0], inputs.size(0))
top5.update(prec5[0], inputs.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
#progress_bar(batch_idx, len(train_loader), 'Loss: {loss.val:.4f} | Acc: %.3f%% (%d/%d)'
# % (train_loss/(batch_idx+1), 100.*correct/total, correct, total))
progress_bar(batch_idx, len(train_loader),
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
'''
progress_bar(batch_idx, len(train_loader))
def test():
global best_acc
net.eval()
test_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(val_loader):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs), Variable(targets)
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.data[0].item()
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
progress_bar(batch_idx, len(val_loader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss/(batch_idx+1), 100.*float(correct)/float(total), correct, total))
# Save checkpoint.
acc = 100.*correct/total
if args.mode == 0:
pass
else:
if acc > best_acc:
print('Saving..')
state = {
'net': net.module if use_cuda else net,
'acc': acc,
}
if not os.path.isdir('checkpoint'):
os.mkdir('checkpoint')
#torch.save(state, './checkpoint/ckpt_20180726.t0')
best_acc = acc
def retrain(epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
net.train()
end = time.time()
for batch_idx, (inputs, targets) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if use_cuda is not None:
inputs, targets = inputs.cuda(), targets.cuda()
# compute output
outputs = net(inputs)
loss = criterion(outputs, targets)
# measure accuracy and record loss
prec1, prec5 = accuracy(outputs, targets, topk=(1, 5))
losses.update(loss.item(), inputs.size(0))
top1.update(prec1[0], inputs.size(0))
top5.update(prec5[0], inputs.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
#quantize()
#pruneNetwork(mask)
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
#progress_bar(batch_idx, len(train_loader), 'Loss: {loss.val:.4f} | Acc: %.3f%% (%d/%d)'
# % (train_loss/(batch_idx+1), 100.*correct/total, correct, total))
progress_bar(batch_idx, len(train_loader),
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
def roundmax(input):
'''
maximum = 2 ** args.iwidth - 1
minimum = -maximum - 1
input = F.relu(torch.add(input, -minimum))
input = F.relu(torch.add(torch.neg(input), maximum - minimum))
input = torch.add(torch.neg(input), maximum)
'''
return input
def quant(input):
#input = torch.round(input / (2 ** (-args.aprec))) * (2 ** (-args.aprec))
return input
mode = args.mode
if mode == 0: # only inference
test()
elif mode == 1: # mode=1 is training & inference @ each epoch
for epoch in range(start_epoch, start_epoch+num_epoch):
train(epoch)
exit()
else:
pass
| [
"[email protected]"
]
| |
27a49544c7c1b8f8f550a76bdb9f95675a635c6a | 3cedb583e9f3dfcdf16aeba56a0b3ff7c6213e99 | /python-codes/m3_curso_em_video_estruturas_compostas/ex101.0.py | b156f239ea79ed513ea7696f940a01732d28e535 | [
"MIT"
]
| permissive | lucasportella/learning-python | 0f39ae2389db6d07b5b8c14ebe0c24f1e93c77c5 | a9449dffd489e7e1f1619e3acef86bc2c64f0f14 | refs/heads/master | 2022-12-26T15:04:12.806300 | 2020-10-14T23:17:47 | 2020-10-14T23:17:47 | 260,685,503 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | def voto(ano_nascimento):
from datetime import date
idade = date.today().year - ano_nascimento
if idade < 16:
return print(f"Com {idade} anos: VOTO NEGADO")
elif 16 <= idade < 18 or idade >= 70:
return print(f"Com {idade} anos: VOTO OPCIONAL")
elif idade >= 18 and idade < 70:
return print(f"Com {idade} anos: VOTO OBRIGATÓRIO")
print('--'*10)
voto(int(input("Em que ano você nasceu? ")))
| [
"[email protected]"
]
| |
bdaf49b8f1852494947d57dd9f3e385d7cb21ecb | 73c9211d5627594e0191510f0b4d70a907f5c4c5 | /pytest/lesson6/TestXlsxReportdemo.py | 2c2e3aef8262fceb1736ac41921a38a074af96c5 | []
| no_license | tigerxjtu/py3 | 35378f270363532fb30962da8674dbcee99eb5ff | 5d24cd074f51bd0f17f6cc4f5f1a6e7cf0d48779 | refs/heads/master | 2021-07-13T05:34:15.080119 | 2020-06-24T09:36:33 | 2020-06-24T09:36:33 | 159,121,100 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,744 | py | # -*- coding:utf-8 -*-
import xlsxwriter
import time
from pytest.lesson4.testrequest import *
from pytest.lesson4.testvote import *
from pytest.lesson4.testrequest import *
from pytest.testdata.getpath import GetTestDataPath
import xlrd
#把GetTestReport方法自己写出来
from pytest.testdata.getpath import GetTestReport
testurl="http://127.0.0.1:8000"
ReportPath=GetTestReport()
workbook = xlsxwriter.Workbook(ReportPath)
worksheet = workbook.add_worksheet("测试总结")
worksheet2 = workbook.add_worksheet("用例详情")
test_polls()
test_vote()
test_login()
TestReport = hlist # 调用测试结果
hpassnum = 0 # 定义一个变量,用来计算测试通过的用例数量
def get_format(wd, option={}):
return wd.add_format(option)
# 设置居中
def get_format_center(wd, num=1):
return wd.add_format({'align': 'center', 'valign': 'vcenter', 'border': num})
def set_border_(wd, num=1):
return wd.add_format({}).set_border(num)
# 写数据
def _write_center(worksheet, cl, data, wd):
return worksheet.write(cl, data, get_format_center(wd))
# 生成饼形图
def pie(workbook, worksheet):
chart1 = workbook.add_chart({'type': 'pie'})
chart1.add_series({
'name': '接口测试统计',
'categories': '=测试总结!$D$4:$D$5',
'values': '=测试总结!$E$4:$E$5',
})
chart1.set_title({'name': '接口测试统计'})
chart1.set_style(10)
worksheet.insert_chart('A9', chart1, {'x_offset': 25, 'y_offset': 10})
def init(worksheet):
global workbook
# 设置列行的宽高
worksheet.set_column("A:A", 15)
worksheet.set_column("B:B", 20)
worksheet.set_column("C:C", 20)
worksheet.set_column("D:D", 20)
worksheet.set_column("E:E", 20)
worksheet.set_column("F:F", 20)
worksheet.set_row(1, 30)
worksheet.set_row(2, 30)
worksheet.set_row(3, 30)
worksheet.set_row(4, 30)
worksheet.set_row(5, 30)
# worksheet.set_row(0, 200)
define_format_H1 = get_format(workbook, {'bold': True, 'font_size': 18})
define_format_H2 = get_format(workbook, {'bold': True, 'font_size': 14})
define_format_H1.set_border(1)
define_format_H2.set_border(1)
define_format_H1.set_align("center")
define_format_H2.set_align("center")
define_format_H2.set_bg_color("blue")
define_format_H2.set_color("#ffffff")
# Create a new Chart object.
worksheet.merge_range('A1:F1', '接口自动化测试报告', define_format_H1)
worksheet.merge_range('A2:F2', '测试概括', define_format_H2)
worksheet.merge_range('A3:A6', '炼数成金', get_format_center(workbook))
# worksheet.insert_image('A1', GetLogoDataPath())
_write_center(worksheet, "B3", '项目名称', workbook)
_write_center(worksheet, "B4", '接口版本', workbook)
_write_center(worksheet, "B5", '脚本语言', workbook)
_write_center(worksheet, "B6", '测试地址', workbook)
data = {"test_name": "炼数成金项目接口", "test_version": "v1.0.0",
"test_pl": "Python3", "test_net": testurl}
_write_center(worksheet, "C3", data['test_name'], workbook)
_write_center(worksheet, "C4", data['test_version'], workbook)
_write_center(worksheet, "C5", data['test_pl'], workbook)
_write_center(worksheet, "C6", data['test_net'], workbook)
_write_center(worksheet, "D3", "测试用例总数", workbook)
_write_center(worksheet, "D4", "测试用例通过数", workbook)
_write_center(worksheet, "D5", "测试用例失败数", workbook)
_write_center(worksheet, "D6", "测试日期", workbook)
timenow = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
data1 = {"test_sum": len(TestReport),
"test_success": hpassnum,
"test_failed": len(TestReport) - hpassnum,
"test_date": timenow}
_write_center(worksheet, "E3", data1['test_sum'], workbook)
_write_center(worksheet, "E4", data1['test_success'], workbook)
_write_center(worksheet, "E5", data1['test_failed'], workbook)
_write_center(worksheet, "E6", data1['test_date'], workbook)
_write_center(worksheet, "F3", "测试用例通过率", workbook)
worksheet.merge_range('F4:F6', str(
(round(hpassnum / len(TestReport), 2)) * 100) + '%', get_format_center(workbook))
pie(workbook, worksheet)
def test_detail(worksheet):
# 设置列宽高
worksheet.set_column("A:A", 30)
worksheet.set_column("B:B", 20)
worksheet.set_column("C:C", 20)
worksheet.set_column("D:D", 20)
worksheet.set_column("E:E", 20)
worksheet.set_column("F:F", 20)
worksheet.set_column("G:G", 20)
worksheet.set_column("H:H", 20)
# 设置行的宽高
for hrow in range(len(TestReport) + 2):
worksheet.set_row(hrow, 30)
worksheet.merge_range('A1:H1', '测试详情', get_format(workbook, {'bold': True,
'font_size': 18,
'align': 'center',
'valign': 'vcenter',
'bg_color': 'blue',
'font_color': '#ffffff'}))
_write_center(worksheet, "A2", '用例ID', workbook)
_write_center(worksheet, "B2", '接口名称', workbook)
_write_center(worksheet, "C2", '接口协议', workbook)
_write_center(worksheet, "D2", 'URL', workbook)
_write_center(worksheet, "E2", '参数', workbook)
_write_center(worksheet, "F2", '预期值', workbook)
_write_center(worksheet, "G2", '实际值', workbook)
_write_center(worksheet, "H2", '测试结果', workbook)
data = {"info": TestReport} # 获取测试结果被添加到测试报告里
temp = len(TestReport) + 2
global hpassnum
for item in data["info"]:
if item["t_result"] == "通过":
hpassnum += 1
else:
pass
_write_center(worksheet, "A" + str(temp), item["t_id"], workbook)
_write_center(worksheet, "B" + str(temp), item["t_name"], workbook)
_write_center(worksheet, "C" + str(temp), item["t_method"], workbook)
_write_center(worksheet, "D" + str(temp), item["t_url"], workbook)
_write_center(worksheet, "E" + str(temp), item["t_param"], workbook)
_write_center(worksheet, "F" + str(temp), item["t_hope"], workbook)
_write_center(worksheet, "G" + str(temp), item["t_actual"], workbook)
_write_center(worksheet, "H" + str(temp), item["t_result"], workbook)
temp = temp - 1
test_detail(worksheet2)
init(worksheet)
workbook.close()
| [
"[email protected]"
]
| |
15cb6d7afdc7fc7eaaeaf492f771909ea8cda285 | 833b43575815ce6c5fa8cbac2628cb774331eda7 | /chap14_p277_code1.py | ae943fb048c09744b8a7feb977edb8216aa7d722 | []
| no_license | ai-times/infinitybook_python | d9529dfe7d486bf5c713d52b530915a23cbf1812 | 1c011c31994d07fe959bba9b519c4365f5f40e7f | refs/heads/main | 2023-03-01T12:18:20.695888 | 2021-02-14T04:22:40 | 2021-02-14T04:22:40 | 338,578,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | py | code = input("주민번호 앞자리 입력 : ")
y = "19" + code[0:2]
m = code[2:4]
d = code[4:6]
age = 2019-int(y)+1
print("당신은", y, "년에 태어났군요.")
print("당신의 생일은", m, "월", d, "일 이군요.")
print("당신의 올해", age, "살 이군요")
| [
"[email protected]"
]
| |
2918cf510c1ddfe401a31a9ef624cd06e9c23e25 | edb6545500e39df9c67aa918a6125bffc8ec1aee | /src/prompt_toolkit/layout/screen.py | 2b58272ca1cc52d588539ac551aa16498dd12b20 | [
"BSD-3-Clause"
]
| permissive | repnzscasb/python-prompt-toolkit | 2681716b0e10ef816228091a19700d805ec0f4d9 | da05f669d00817655f76b82972272d4d5f4d4225 | refs/heads/master | 2022-12-22T09:34:26.426466 | 2022-12-06T22:35:54 | 2022-12-06T22:35:54 | 148,856,050 | 0 | 0 | BSD-3-Clause | 2018-09-15T00:53:56 | 2018-09-15T00:53:56 | null | UTF-8 | Python | false | false | 10,241 | py | from collections import defaultdict
from typing import TYPE_CHECKING, Callable, DefaultDict, Dict, List, Optional, Tuple
from prompt_toolkit.cache import FastDictCache
from prompt_toolkit.data_structures import Point
from prompt_toolkit.utils import get_cwidth
if TYPE_CHECKING:
from .containers import Window
__all__ = [
"Screen",
"Char",
]
class Char:
"""
Represent a single character in a :class:`.Screen`.
This should be considered immutable.
:param char: A single character (can be a double-width character).
:param style: A style string. (Can contain classnames.)
"""
__slots__ = ("char", "style", "width")
# If we end up having one of these special control sequences in the input string,
# we should display them as follows:
# Usually this happens after a "quoted insert".
display_mappings: Dict[str, str] = {
"\x00": "^@", # Control space
"\x01": "^A",
"\x02": "^B",
"\x03": "^C",
"\x04": "^D",
"\x05": "^E",
"\x06": "^F",
"\x07": "^G",
"\x08": "^H",
"\x09": "^I",
"\x0a": "^J",
"\x0b": "^K",
"\x0c": "^L",
"\x0d": "^M",
"\x0e": "^N",
"\x0f": "^O",
"\x10": "^P",
"\x11": "^Q",
"\x12": "^R",
"\x13": "^S",
"\x14": "^T",
"\x15": "^U",
"\x16": "^V",
"\x17": "^W",
"\x18": "^X",
"\x19": "^Y",
"\x1a": "^Z",
"\x1b": "^[", # Escape
"\x1c": "^\\",
"\x1d": "^]",
"\x1e": "^^",
"\x1f": "^_",
"\x7f": "^?", # ASCII Delete (backspace).
# Special characters. All visualized like Vim does.
"\x80": "<80>",
"\x81": "<81>",
"\x82": "<82>",
"\x83": "<83>",
"\x84": "<84>",
"\x85": "<85>",
"\x86": "<86>",
"\x87": "<87>",
"\x88": "<88>",
"\x89": "<89>",
"\x8a": "<8a>",
"\x8b": "<8b>",
"\x8c": "<8c>",
"\x8d": "<8d>",
"\x8e": "<8e>",
"\x8f": "<8f>",
"\x90": "<90>",
"\x91": "<91>",
"\x92": "<92>",
"\x93": "<93>",
"\x94": "<94>",
"\x95": "<95>",
"\x96": "<96>",
"\x97": "<97>",
"\x98": "<98>",
"\x99": "<99>",
"\x9a": "<9a>",
"\x9b": "<9b>",
"\x9c": "<9c>",
"\x9d": "<9d>",
"\x9e": "<9e>",
"\x9f": "<9f>",
# For the non-breaking space: visualize like Emacs does by default.
# (Print a space, but attach the 'nbsp' class that applies the
# underline style.)
"\xa0": " ",
}
def __init__(self, char: str = " ", style: str = "") -> None:
# If this character has to be displayed otherwise, take that one.
if char in self.display_mappings:
if char == "\xa0":
style += " class:nbsp " # Will be underlined.
else:
style += " class:control-character "
char = self.display_mappings[char]
self.char = char
self.style = style
# Calculate width. (We always need this, so better to store it directly
# as a member for performance.)
self.width = get_cwidth(char)
# In theory, `other` can be any type of object, but because of performance
# we don't want to do an `isinstance` check every time. We assume "other"
# is always a "Char".
def _equal(self, other: "Char") -> bool:
return self.char == other.char and self.style == other.style
def _not_equal(self, other: "Char") -> bool:
# Not equal: We don't do `not char.__eq__` here, because of the
# performance of calling yet another function.
return self.char != other.char or self.style != other.style
if not TYPE_CHECKING:
__eq__ = _equal
__ne__ = _not_equal
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.char!r}, {self.style!r})"
_CHAR_CACHE: FastDictCache[Tuple[str, str], Char] = FastDictCache(
Char, size=1000 * 1000
)
Transparent = "[transparent]"
class Screen:
"""
Two dimensional buffer of :class:`.Char` instances.
"""
def __init__(
self,
default_char: Optional[Char] = None,
initial_width: int = 0,
initial_height: int = 0,
) -> None:
if default_char is None:
default_char2 = _CHAR_CACHE[" ", Transparent]
else:
default_char2 = default_char
self.data_buffer: DefaultDict[int, DefaultDict[int, Char]] = defaultdict(
lambda: defaultdict(lambda: default_char2)
)
#: Escape sequences to be injected.
self.zero_width_escapes: DefaultDict[int, DefaultDict[int, str]] = defaultdict(
lambda: defaultdict(lambda: "")
)
#: Position of the cursor.
self.cursor_positions: Dict[
"Window", Point
] = {} # Map `Window` objects to `Point` objects.
#: Visibility of the cursor.
self.show_cursor = True
#: (Optional) Where to position the menu. E.g. at the start of a completion.
#: (We can't use the cursor position, because we don't want the
#: completion menu to change its position when we browse through all the
#: completions.)
self.menu_positions: Dict[
"Window", Point
] = {} # Map `Window` objects to `Point` objects.
#: Currently used width/height of the screen. This will increase when
#: data is written to the screen.
self.width = initial_width or 0
self.height = initial_height or 0
# Windows that have been drawn. (Each `Window` class will add itself to
# this list.)
self.visible_windows_to_write_positions: Dict["Window", "WritePosition"] = {}
# List of (z_index, draw_func)
self._draw_float_functions: List[Tuple[int, Callable[[], None]]] = []
@property
def visible_windows(self) -> List["Window"]:
return list(self.visible_windows_to_write_positions.keys())
def set_cursor_position(self, window: "Window", position: Point) -> None:
"""
Set the cursor position for a given window.
"""
self.cursor_positions[window] = position
def set_menu_position(self, window: "Window", position: Point) -> None:
"""
Set the cursor position for a given window.
"""
self.menu_positions[window] = position
def get_cursor_position(self, window: "Window") -> Point:
"""
Get the cursor position for a given window.
Returns a `Point`.
"""
try:
return self.cursor_positions[window]
except KeyError:
return Point(x=0, y=0)
def get_menu_position(self, window: "Window") -> Point:
"""
Get the menu position for a given window.
(This falls back to the cursor position if no menu position was set.)
"""
try:
return self.menu_positions[window]
except KeyError:
try:
return self.cursor_positions[window]
except KeyError:
return Point(x=0, y=0)
def draw_with_z_index(self, z_index: int, draw_func: Callable[[], None]) -> None:
"""
Add a draw-function for a `Window` which has a >= 0 z_index.
This will be postponed until `draw_all_floats` is called.
"""
self._draw_float_functions.append((z_index, draw_func))
def draw_all_floats(self) -> None:
"""
Draw all float functions in order of z-index.
"""
# We keep looping because some draw functions could add new functions
# to this list. See `FloatContainer`.
while self._draw_float_functions:
# Sort the floats that we have so far by z_index.
functions = sorted(self._draw_float_functions, key=lambda item: item[0])
# Draw only one at a time, then sort everything again. Now floats
# might have been added.
self._draw_float_functions = functions[1:]
functions[0][1]()
def append_style_to_content(self, style_str: str) -> None:
"""
For all the characters in the screen.
Set the style string to the given `style_str`.
"""
b = self.data_buffer
char_cache = _CHAR_CACHE
append_style = " " + style_str
for y, row in b.items():
for x, char in row.items():
row[x] = char_cache[char.char, char.style + append_style]
def fill_area(
self, write_position: "WritePosition", style: str = "", after: bool = False
) -> None:
"""
Fill the content of this area, using the given `style`.
The style is prepended before whatever was here before.
"""
if not style.strip():
return
xmin = write_position.xpos
xmax = write_position.xpos + write_position.width
char_cache = _CHAR_CACHE
data_buffer = self.data_buffer
if after:
append_style = " " + style
prepend_style = ""
else:
append_style = ""
prepend_style = style + " "
for y in range(
write_position.ypos, write_position.ypos + write_position.height
):
row = data_buffer[y]
for x in range(xmin, xmax):
cell = row[x]
row[x] = char_cache[
cell.char, prepend_style + cell.style + append_style
]
class WritePosition:
def __init__(self, xpos: int, ypos: int, width: int, height: int) -> None:
assert height >= 0
assert width >= 0
# xpos and ypos can be negative. (A float can be partially visible.)
self.xpos = xpos
self.ypos = ypos
self.width = width
self.height = height
def __repr__(self) -> str:
return "{}(x={!r}, y={!r}, width={!r}, height={!r})".format(
self.__class__.__name__,
self.xpos,
self.ypos,
self.width,
self.height,
)
| [
"[email protected]"
]
| |
af9738f6a4a38219406718a295ea78a732a3232d | a5205843ab0c6cff8f76f32436c580cfd523e9ad | /edit_sample_craps.py | cb01ef33a0829d35b2b1f5ee2d59d478e474790b | []
| no_license | LRBeaver/Random | 70194cde5d26b5e268d7c245056cedc8d0a6618d | 90ec0036a4efb383d6496a7724a108aa1b2f2ddf | refs/heads/master | 2020-12-24T18:42:37.716951 | 2016-04-14T12:52:56 | 2016-04-14T12:52:56 | 56,150,599 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,207 | py | __author__ = 'lyndsay.beaver'
import random
def playRound():
print("The come-out phase: ")
print()
rollDice = input("Hit ENTER to roll the dice...")
diceTotal = random.randint(1,6) + random.randint(1,6)
if diceTotal in (7,11):
print("You rolled a", diceTotal)
print("You Win: Natural!")
elif diceTotal in (2,3,12):
print("You rolled a", diceTotal)
print("You Lose: Crap-Out!")
else:
print("You rolled a", diceTotal)
pointPhase(diceTotal)
def pointPhase(diceTotal):
print("The Point Phase:")
rollDice = input("Hit ENTER to roll the dice...")
diceTotalPoint = random.randint(1,6) + random.randint(1,6)
while diceTotalPoint not in (7, diceTotal):
diceTotalPoint = random.randint(1,6) + random.randint(1,6)
if diceTotalPoint == diceTotal:
print("You Rolled a", diceTotalPoint)
print("You Win: Hit!")
break
elif diceTotalPoint == 7:
print("You Rolled a", diceTotalPoint)
print("You lose: Seven-Out!")
else:
print("Keep Rolling")
def main():
playRound()
main() | [
"[email protected]"
]
| |
221f4c8150fddc906199d788e70ea2553500a8f7 | 2903ac66369b6bd45889b12629d8c8e34e6089b3 | /frappe_training/config/desktop.py | 60ea98f53064fec38a864b70c7e641453fb4dd78 | [
"MIT"
]
| permissive | sivaranjanipalanivel/training | 6fa50b5f97fb00894404fba11122599fd796623c | b177c56a319c07dc3467ce3113e332ecee9b81fa | refs/heads/master | 2023-07-17T06:11:29.894363 | 2021-08-02T14:47:31 | 2021-08-02T14:47:31 | 391,987,470 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"module_name": "frappe_training",
"color": "grey",
"icon": "octicon octicon-file-directory",
"type": "module",
"label": _("frappe_training")
}
]
| [
"[email protected]"
]
| |
a69a5ede8bc3f3237d149df470385eda0dce6cb6 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /YLf984Eod74ha4Tok_9.py | 8d3ff278a5843fa0485c8620003772aaf0edbc8e | []
| no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,331 | py | """
In a calendar year, it is exactly 365.25 days. But, eventually, this will lead
to confusion because humans normally count by exact divisibility of 1 and not
with decimal points. So, to avoid the latter, it was decided to add up all
0.25 days every four-year cycle, make that year to sum up to 366 days
(including February 29 as an intercalary day), thus, called a **leap year**
and aside the other years of the four-year cycle to sum up to 365 days, **not
a leap year**.
In this challenge, (though quite repetitive), we'll take it to a new level,
where, you are to determine if it's a leap year or not without the use of the
**datetime** class, **if blocks** , **if-elif blocks** , **conditionals** (`a
if b else c`) nor the logical operators **AND** (`and`) and **OR** (`or`) with
the exemption of the **NOT** (`not`) operator.
Return `True` if it's a leap year, `False` otherwise.
### Examples
leap_year(1979) ➞ False
leap_year(2000) ➞ True
leap_year(2016) ➞ True
leap_year(1521) ➞ False
leap_year(1996) ➞ True
leap_year(1800) ➞ False
### Notes
You can't use the **datetime** class, **if statements** in general, the
**conditional** nor the **logical operators** (`and`, `or`).
"""
def leap_year(yr):
return yr%400 == 0 if not yr%100 else yr%4 == 0
| [
"[email protected]"
]
| |
85863f93c57442e96186df3112f03e59a994bebf | b22588340d7925b614a735bbbde1b351ad657ffc | /athena/InnerDetector/InDetExample/InDetSLHC_Example/share/jobOptions_SLHC_nn_prodTrainingSample.py | f8455debc388b3c7208aa0f0ff0ccf73d99c6714 | []
| no_license | rushioda/PIXELVALID_athena | 90befe12042c1249cbb3655dde1428bb9b9a42ce | 22df23187ef85e9c3120122c8375ea0e7d8ea440 | refs/heads/master | 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,056 | py | ###############################################################################
# jobOptions_SLHC_nn_prodTrainingSample.py
#
# script that reads a series of simulated HIT files, runs digitization and
# clusterization and produces the Ntuples needed to train the cluster splitting
# neuronal network.
# The ntuples produced are stored in TrkValidation.root
# -Validation
# |-> PixelRIOs : Cluster info.
# |-> NNinput : Input to train the NN.
#
# Note: This jobOptions WILL NOT WORK as it is neither for SLHC nor for IBL.
# YOU NEED TO EDIT PixelClusterValidationNtupleWriter.cxx
# IN InnerDetector/InDetValidation/InDetTrackValidation/InDetTrackValidation/
# TO USE ToT INSTEAD OF CHARGE IN NNinput
#
# Note 2: This jobOptions are based on InDetSLHCExample options. There there
# is also a stand alone .py file in this dir.
#
# Author: Tiago Perez <[email protected]>
# Date: 9-Jan-2012
##############################################################################
#--------------------------------------------------------------
# Template jobOptions: SLHC
# - Digitization
#--------------------------------------------------------------
from AthenaCommon.GlobalFlags import globalflags
globalflags.ConditionsTag = "OFLCOND-SDR-BS14T-ATLAS-00"
include("InDetSLHC_Example/preInclude.SLHC.py")
include("InDetSLHC_Example/preInclude.SiliconOnly.py")
from AthenaCommon.AthenaCommonFlags import jobproperties
jobproperties.AthenaCommonFlags.EvtMax=-1
#
## Input data
DATADIR="root://eosatlas.cern.ch//eos/atlas/user/t/tperez/"
#
## MinBias
#FILEPATH+="mc11_slhcid.108119.Pythia8_minbias_Inelastic_high.merge.HITS.e876_s1333_s1335_tid514272_00/"
#FILEPATH+="HITS.514272._000030.pool.root.1"
#
## ttbar
FILEPATH=DATADIR+"mc11_slhcid.105568.ttbar_Pythia.simul.HITS.e842_s1333_tid510282_00/"
FILEPATH+="HITS.510282._000429.pool.root.1"
#
#
jobproperties.AthenaCommonFlags.PoolHitsInput=[FILEPATH]
jobproperties.AthenaCommonFlags.PoolRDOOutput=DATADIR+"ttbar.digit.RDO.pool.root"
from AthenaCommon.GlobalFlags import jobproperties
jobproperties.Global.DetDescrVersion='ATLAS-SLHC-01-00-00'
from Digitization.DigitizationFlags import jobproperties
jobproperties.Digitization.doInDetNoise=False
include ( "Digitization/Digitization.py" )
include("InDetSLHC_Example/postInclude.SLHC_Digitization.py")
#
# Start clusterization
#
#
# Suppress usage of pixel distortions when validating simulation
# (otherwise clusters are corrected for module bow while G4 is not)
#
from IOVDbSvc.CondDB import conddb
if not conddb.folderRequested('/Indet/PixelDist'):
conddb.addFolder('PIXEL_OFL','/Indet/PixelDist')
conddb.addOverride("/Indet/PixelDist","InDetPixelDist-nominal")
#
# Include clusterization
# (need to set up services not already configured for digitization)
#
#include ("PixelConditionsServices/PixelRecoDb_jobOptions.py")
#
## Disable some COOL queries ?
from PixelConditionsTools.PixelConditionsToolsConf import PixelRecoDbTool
ToolSvc += PixelRecoDbTool()
ToolSvc.PixelRecoDbTool.InputSource = 0
## Configure the clusterization tool
from SiClusterizationTool.SiClusterizationToolConf import InDet__ClusterMakerTool
ClusterMakerTool = InDet__ClusterMakerTool( name = "InDet::ClusterMakerTool",
UsePixelCalibCondDB = False )
ToolSvc += ClusterMakerTool
## Configure PixelConditionsSummarySvc
from PixelConditionsServices.PixelConditionsServicesConf import PixelConditionsSummarySvc
InDetPixelConditionsSummarySvc = PixelConditionsSummarySvc()
InDetPixelConditionsSummarySvc.UseSpecialPixelMap = False
InDetPixelConditionsSummarySvc.UseDCS = False
InDetPixelConditionsSummarySvc.UseByteStream = False
ServiceMgr += InDetPixelConditionsSummarySvc
print InDetPixelConditionsSummarySvc
from InDetPrepRawDataFormation.InDetPrepRawDataFormationConf import InDet__PixelClusterization
job += InDet__PixelClusterization("PixelClusterization")
#
# Include PixelValidationNtuple
# with some information about Geant4 hits
#
from InDetTrackValidation.InDetTrackValidationConf import InDet__PixelClusterValidationNtupleWriter
job += InDet__PixelClusterValidationNtupleWriter("PixelNtupleWriter",
NtupleFileName = 'TRKVAL',
NtupleDirectoryName = 'Validation',
NtupleTreeName = 'PixelRIOs',
PixelClusterContainer = 'PixelClusters',
WriteDetailedPixelInformation = False,
DoHits = True,
DoMC = True,
FindNotAssociatedParticle= False,
WriteNNTraining = True,
# Extra flags ONLY ON PRIVATE InDetTrackValidation/PixelClusterValidationNtupleWriter
UseToT = True,
DetGeo = 'SLHC')
print job.PixelNtupleWriter
theApp.HistogramPersistency = 'ROOT'
if not 'OutputNTpl' in dir():
OutputNTpl = "TrkValidation_noTrack_ttbar_.root"
# Root file definition
if not hasattr(ServiceMgr, 'THistSvc'):
from GaudiSvc.GaudiSvcConf import THistSvc
ServiceMgr += THistSvc()
ServiceMgr.THistSvc.Output += [ "TRKVAL DATAFILE='" + OutputNTpl + "' TYPE='ROOT' OPT='RECREATE'" ]
theApp.Dlls += [ 'RootHistCnv' ]
#
#
#
MessageSvc = Service( "MessageSvc" )
#increase the number of letter reserved to the alg/tool name from 18 to 30
MessageSvc.Format = "% F%50W%S%7W%R%T %0W%M"
# to change the default limit on number of message per alg
MessageSvc.defaultLimit = 9999999 # all messages
# Set output level threshold among DEBUG, INFO, WARNING, ERROR, FATAL
MessageSvc.OutputLevel = INFO
include("InDetSLHC_Example/postInclude.SLHC_Setup.py")
| [
"[email protected]"
]
| |
d49a088bb0cfd1df5be0927b59cd9782ace85d05 | d0e83b3f551c6af16aa0c8ed4ff074b3ec268120 | /processors/feat.py | e48cc144ee2ba12b7865cdbb61a44eb472849820 | []
| no_license | SamuelLAN/kaggle_SCTP | cfb0228a81d71b2f1c315352bd6435042066967f | 50ff2895baa6de29bdb19bfb20ca76718079d188 | refs/heads/master | 2020-04-25T16:22:07.803524 | 2019-04-03T09:06:12 | 2019-04-03T09:06:12 | 172,909,260 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 866 | py | #!/usr/bin/Python
# -*- coding: utf-8 -*-
import numpy as np
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
def lda(train_x, train_y, val_x, test_x):
''' LDA reduce the dimensions of the features '''
_lda = LDA()
train_x = _lda.fit_transform(train_x, train_y)
val_x = _lda.transform(val_x)
test_x = _lda.transform(test_x)
return train_x, val_x, test_x
def add_lda(train_x, train_y, val_x, test_x):
''' LDA reduce the dimensions of the features; and add this lda feature to the origin features '''
_lda = LDA()
train_lda = _lda.fit_transform(train_x, train_y)
val_lda = _lda.transform(val_x)
test_lda = _lda.transform(test_x)
train_x = np.hstack([train_x, train_lda])
val_x = np.hstack([val_x, val_lda])
test_x = np.hstack([test_x, test_lda])
return train_x, val_x, test_x
| [
"[email protected]"
]
| |
7e9dcb08a5d09de543ba08b0a18e43862bec4e80 | 8537ecfe2a23cfee7c9f86e2318501f745078d67 | /Practise_stuff/nympy_commands/oo_numpy_array_manipulation2.py | 2fd9ce51e253406e6f5724fd2fcd8efc7014909a | []
| no_license | oolsson/oo_eclipse | 91d33501d9ed6c6b3c51bb22b635eb75da88e4e1 | 1828866bc4e1f67b279c5a037e4a6a4439ddb090 | refs/heads/master | 2021-01-01T20:17:12.644890 | 2015-11-30T09:49:41 | 2015-11-30T09:49:41 | 23,485,434 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 561 | py | '''
Created on Jan 22, 2012
@author: oo
'''
import numpy
np=numpy
A=[1,2,3]
B=[4,5,6]
A=np.array(A)
B=np.array(B)
c=np.concatenate((A,B))
print c
print '2------------'
c=np.column_stack((A,B))
print c
print '3------------'
c=np.hstack((A,B))
print c
c=np.vstack((A,B))
print c
print '4------------'
c=np.array_split(c,1)
print c
print '5-----------'
d=np.array([1])
d=np.tile(d,7)
print d
print '6-----------'
x = np.array([[1,2],[3,4]])
print np.repeat(x, 1)
print np.repeat(x, 3, axis=1)
print np.repeat(x, [1, 2], axis=0)
| [
"[email protected]"
]
| |
a00c26fde829171625876699fcb8f48c7456fb31 | b0f151047c8313fd18566b020dab374f0d696f96 | /academicstoday/tenant_foundation/migrations/0001_initial.py | 4697863b5fa9d9f2584ea2c1b7fe5bd71ebe30d6 | [
"BSD-3-Clause"
]
| permissive | abhijitdalavi/Django-paas | 4c65477f3865a344a789e4ff0666f792dfda13a6 | cf58cf216d377ea97a2676cd594f96fb9d602a46 | refs/heads/master | 2022-04-01T06:27:50.409132 | 2018-04-09T23:35:32 | 2018-04-09T23:35:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,970 | py | # Generated by Django 2.0.4 on 2018-04-08 23:08
from decimal import Decimal
from django.db import migrations, models
import djmoney.models.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Course',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(db_index=True, help_text='The title of this course.', max_length=63, verbose_name='Title')),
('sub_title', models.CharField(blank=True, help_text='The sub-title of this course.', max_length=127, null=True, verbose_name='Sub-Title')),
('category_text', models.CharField(db_index=True, help_text='The category text of this course.', max_length=127, verbose_name='Category Text')),
('description', models.TextField(blank=True, help_text='The course description.', null=True, verbose_name='Description')),
('status', models.PositiveSmallIntegerField(blank=True, default=0, verbose_name='Description')),
('purchase_fee_currency', djmoney.models.fields.CurrencyField(choices=[('XUA', 'ADB Unit of Account'), ('AFN', 'Afghani'), ('DZD', 'Algerian Dinar'), ('ARS', 'Argentine Peso'), ('AMD', 'Armenian Dram'), ('AWG', 'Aruban Guilder'), ('AUD', 'Australian Dollar'), ('AZN', 'Azerbaijanian Manat'), ('BSD', 'Bahamian Dollar'), ('BHD', 'Bahraini Dinar'), ('THB', 'Baht'), ('PAB', 'Balboa'), ('BBD', 'Barbados Dollar'), ('BYN', 'Belarussian Ruble'), ('BYR', 'Belarussian Ruble'), ('BZD', 'Belize Dollar'), ('BMD', 'Bermudian Dollar (customarily known as Bermuda Dollar)'), ('BTN', 'Bhutanese ngultrum'), ('VEF', 'Bolivar Fuerte'), ('BOB', 'Boliviano'), ('XBA', 'Bond Markets Units European Composite Unit (EURCO)'), ('BRL', 'Brazilian Real'), ('BND', 'Brunei Dollar'), ('BGN', 'Bulgarian Lev'), ('BIF', 'Burundi Franc'), ('XOF', 'CFA Franc BCEAO'), ('XAF', 'CFA franc BEAC'), ('XPF', 'CFP Franc'), ('CAD', 'Canadian Dollar'), ('CVE', 'Cape Verde Escudo'), ('KYD', 'Cayman Islands Dollar'), ('CLP', 'Chilean peso'), ('XTS', 'Codes specifically reserved for testing purposes'), ('COP', 'Colombian peso'), ('KMF', 'Comoro Franc'), ('CDF', 'Congolese franc'), ('BAM', 'Convertible Marks'), ('NIO', 'Cordoba Oro'), ('CRC', 'Costa Rican Colon'), ('HRK', 'Croatian Kuna'), ('CUP', 'Cuban Peso'), ('CUC', 'Cuban convertible peso'), ('CZK', 'Czech Koruna'), ('GMD', 'Dalasi'), ('DKK', 'Danish Krone'), ('MKD', 'Denar'), ('DJF', 'Djibouti Franc'), ('STD', 'Dobra'), ('DOP', 'Dominican Peso'), ('VND', 'Dong'), ('XCD', 'East Caribbean Dollar'), ('EGP', 'Egyptian Pound'), ('SVC', 'El Salvador Colon'), ('ETB', 'Ethiopian Birr'), ('EUR', 'Euro'), ('XBB', 'European Monetary Unit (E.M.U.-6)'), ('XBD', 'European Unit of Account 17(E.U.A.-17)'), ('XBC', 'European Unit of Account 9(E.U.A.-9)'), ('FKP', 'Falkland Islands Pound'), ('FJD', 'Fiji Dollar'), ('HUF', 'Forint'), ('GHS', 'Ghana Cedi'), ('GIP', 'Gibraltar Pound'), ('XAU', 'Gold'), ('XFO', 'Gold-Franc'), ('PYG', 'Guarani'), ('GNF', 'Guinea Franc'), ('GYD', 'Guyana Dollar'), ('HTG', 'Haitian gourde'), ('HKD', 'Hong Kong Dollar'), ('UAH', 'Hryvnia'), ('ISK', 'Iceland Krona'), ('INR', 'Indian Rupee'), ('IRR', 'Iranian Rial'), ('IQD', 'Iraqi Dinar'), ('IMP', 'Isle of Man Pound'), ('JMD', 'Jamaican Dollar'), ('JOD', 'Jordanian Dinar'), ('KES', 'Kenyan Shilling'), ('PGK', 'Kina'), ('LAK', 'Kip'), ('KWD', 'Kuwaiti Dinar'), ('AOA', 'Kwanza'), ('MMK', 'Kyat'), ('GEL', 'Lari'), ('LVL', 'Latvian Lats'), ('LBP', 'Lebanese Pound'), ('ALL', 'Lek'), ('HNL', 'Lempira'), ('SLL', 'Leone'), ('LSL', 'Lesotho loti'), ('LRD', 'Liberian Dollar'), ('LYD', 'Libyan Dinar'), ('SZL', 'Lilangeni'), ('LTL', 'Lithuanian Litas'), ('MGA', 'Malagasy Ariary'), ('MWK', 'Malawian Kwacha'), ('MYR', 'Malaysian Ringgit'), ('TMM', 'Manat'), ('MUR', 'Mauritius Rupee'), ('MZN', 'Metical'), ('MXV', 'Mexican Unidad de Inversion (UDI)'), ('MXN', 'Mexican peso'), ('MDL', 'Moldovan Leu'), ('MAD', 'Moroccan Dirham'), ('BOV', 'Mvdol'), ('NGN', 'Naira'), ('ERN', 'Nakfa'), ('NAD', 'Namibian Dollar'), ('NPR', 'Nepalese Rupee'), ('ANG', 'Netherlands Antillian Guilder'), ('ILS', 'New Israeli Sheqel'), ('RON', 'New Leu'), ('TWD', 'New Taiwan Dollar'), ('NZD', 'New Zealand Dollar'), ('KPW', 'North Korean Won'), ('NOK', 'Norwegian Krone'), ('PEN', 'Nuevo Sol'), ('MRO', 'Ouguiya'), ('TOP', 'Paanga'), ('PKR', 'Pakistan Rupee'), ('XPD', 'Palladium'), ('MOP', 'Pataca'), ('PHP', 'Philippine Peso'), ('XPT', 'Platinum'), ('GBP', 'Pound Sterling'), ('BWP', 'Pula'), ('QAR', 'Qatari Rial'), ('GTQ', 'Quetzal'), ('ZAR', 'Rand'), ('OMR', 'Rial Omani'), ('KHR', 'Riel'), ('MVR', 'Rufiyaa'), ('IDR', 'Rupiah'), ('RUB', 'Russian Ruble'), ('RWF', 'Rwanda Franc'), ('XDR', 'SDR'), ('SHP', 'Saint Helena Pound'), ('SAR', 'Saudi Riyal'), ('RSD', 'Serbian Dinar'), ('SCR', 'Seychelles Rupee'), ('XAG', 'Silver'), ('SGD', 'Singapore Dollar'), ('SBD', 'Solomon Islands Dollar'), ('KGS', 'Som'), ('SOS', 'Somali Shilling'), ('TJS', 'Somoni'), ('SSP', 'South Sudanese Pound'), ('LKR', 'Sri Lanka Rupee'), ('XSU', 'Sucre'), ('SDG', 'Sudanese Pound'), ('SRD', 'Surinam Dollar'), ('SEK', 'Swedish Krona'), ('CHF', 'Swiss Franc'), ('SYP', 'Syrian Pound'), ('BDT', 'Taka'), ('WST', 'Tala'), ('TZS', 'Tanzanian Shilling'), ('KZT', 'Tenge'), ('XXX', 'The codes assigned for transactions where no currency is involved'), ('TTD', 'Trinidad and Tobago Dollar'), ('MNT', 'Tugrik'), ('TND', 'Tunisian Dinar'), ('TRY', 'Turkish Lira'), ('TMT', 'Turkmenistan New Manat'), ('TVD', 'Tuvalu dollar'), ('AED', 'UAE Dirham'), ('XFU', 'UIC-Franc'), ('USD', 'US Dollar'), ('USN', 'US Dollar (Next day)'), ('UGX', 'Uganda Shilling'), ('CLF', 'Unidad de Fomento'), ('COU', 'Unidad de Valor Real'), ('UYI', 'Uruguay Peso en Unidades Indexadas (URUIURUI)'), ('UYU', 'Uruguayan peso'), ('UZS', 'Uzbekistan Sum'), ('VUV', 'Vatu'), ('CHE', 'WIR Euro'), ('CHW', 'WIR Franc'), ('KRW', 'Won'), ('YER', 'Yemeni Rial'), ('JPY', 'Yen'), ('CNY', 'Yuan Renminbi'), ('ZMK', 'Zambian Kwacha'), ('ZMW', 'Zambian Kwacha'), ('ZWD', 'Zimbabwe Dollar A/06'), ('ZWN', 'Zimbabwe dollar A/08'), ('ZWL', 'Zimbabwe dollar A/09'), ('PLN', 'Zloty')], default='CAD', editable=False, max_length=3)),
('purchase_fee', djmoney.models.fields.MoneyField(blank=True, decimal_places=2, default=Decimal('0'), default_currency='CAD', help_text='The purchase fee that the student will be charged to enroll in this course.', max_digits=10, verbose_name='Purchase Fee')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('last_modified_at', models.DateTimeField(auto_now=True, db_index=True)),
],
options={
'verbose_name': 'Course',
'verbose_name_plural': 'Courses',
'db_table': 'at_applications',
},
),
]
| [
"[email protected]"
]
| |
29a331d74f6806dca2a533c596b4dc2abd4096e1 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_14345.py | d79cee8f8289303a6c465582266bd21f8614a8c8 | []
| no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | # convert hex ascii mixed string like r'\x74op' to 'top' in python
s.decode('string_escape')
| [
"[email protected]"
]
| |
e3c3e76cd3f6345219ed73d91c75b8ea32a227b5 | eab1756b01717e81537133400f36aea4d7a0876f | /dawn/launch-tong.py | cc90b2066a548a7ed4ba16879b0631e9ccd5a8e5 | []
| no_license | bearpelican/cluster | d677fe392ac1196b77e3f8fb79e530ec8371080f | 2e316cf1def0b72b47f79a864ed3aa778c297b95 | refs/heads/master | 2020-03-21T06:52:57.514901 | 2018-08-10T10:20:26 | 2018-08-10T22:33:05 | 138,246,892 | 3 | 1 | null | 2018-06-22T02:51:07 | 2018-06-22T02:51:07 | null | UTF-8 | Python | false | false | 2,593 | py | #!/usr/bin/env python
# numpy01 image, see environment-numpy.org for construction
# (DL AMI v 3.0 based)
#
# us-east-1 AMIs
# numpy00: ami-f9d6dc83
# numpy01: ami-5b524f21
from collections import OrderedDict
import argparse
import os
import sys
import time
import boto3
module_path=os.path.dirname(os.path.abspath(__file__))
sys.path.append(module_path+'/..')
import util
util.install_pdb_handler()
parser = argparse.ArgumentParser(description='launch')
parser.add_argument('--ami', type=str, default='ami-5b524f21',
help="name of AMI to use ")
parser.add_argument('--group', type=str, default='dawn_runs',
help="name of the current run")
parser.add_argument('--name', type=str, default='baseline5-tong',
help="name of the current run")
parser.add_argument('--instance-type', type=str, default='p3.16xlarge',
help="type of instance")
parser.add_argument('--zone', type=str, default='us-east-1f',
help='which availability zone to use')
parser.add_argument('--linux-type', type=str, default='ubuntu',
help='which linux to use: ubuntu or amazon')
parser.add_argument('--role', type=str, default='launcher',
help='launcher or worker')
args = parser.parse_args()
def main():
import aws_backend
run = aws_backend.make_run(args.name, ami=args.ami,
availability_zone=args.zone,
linux_type=args.linux_type)
job = run.make_job('main', instance_type=args.instance_type)
job.wait_until_ready()
print(job.connect_instructions)
# if tensorboard is running, kill it, it will prevent efs logdir from being
# deleted
job.run("tmux kill-session -t tb || echo ok")
logdir = '/efs/runs/%s/%s'%(args.group, args.name)
job.run('rm -Rf %s || echo failed' % (logdir,)) # delete prev logs
# Launch tensorboard visualizer in separate tmux session
job.run("tmux new-session -s tb -n 0 -d")
job.run("tmux send-keys -t tb:0 'source activate mxnet_p36' Enter")
job.run("tmux send-keys -t tb:0 'tensorboard --logdir %s' Enter"%(logdir,))
job.run('source activate mxnet_p36')
job.run('killall python || echo failed') # kill previous run
job.run('pip install -U https://s3.amazonaws.com/inferno-dlami/tensorflow/p3/tensorflow-1.5.0-cp36-cp36m-linux_x86_64.whl')
job.upload('imagenet_utils.py')
job.upload('resnet_model.py')
job.upload('resnet.b512.baseline.py')
job.run_async('python resnet.b512.baseline.py --logdir=%s'%(logdir,))
if __name__=='__main__':
main()
| [
"[email protected]"
]
| |
25e16b899e4063fcda6e3fafd0bc309ec46ee237 | 58e09fac582a76428819e167e42e60765d11bb11 | /space/lib/python3.7/encodings/euc_jp.py | f2043f483c14dcf9d4db30740df0f2159cfe1ea2 | []
| no_license | shanthimadugundi/DB_Project | 25eb2a0e7504f81484ad11c0fa9e902b038c85b4 | b5ba55af1bcddde164cecc60d331d615dd477165 | refs/heads/master | 2020-04-27T05:14:56.107466 | 2019-03-06T05:31:23 | 2019-03-06T05:31:23 | 174,075,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 67 | py | /Users/shanthimadugundi/anaconda3/lib/python3.7/encodings/euc_jp.py | [
"[email protected]"
]
| |
d6ec1defab5ed57216ed8a7c1927d4b569d4f5e7 | f8af2d190600221b7a597ef4de8ee15137e01266 | /django_mysite/polls/serializers.py | eef85178a606057b3aaaf04ed47a05c101d57c8e | []
| no_license | rifqirosyidi/REST-Framework-Searching | 3b4d64ca1d2217a48f1ec1c6591e1b7e1a42797d | 25481026728edfd564bb6ba18c8ce73040e07543 | refs/heads/master | 2023-04-26T02:11:43.684540 | 2021-04-12T09:43:09 | 2021-04-12T09:43:09 | 206,774,068 | 1 | 0 | null | 2023-04-21T20:36:46 | 2019-09-06T10:49:42 | Python | UTF-8 | Python | false | false | 202 | py | from rest_framework import serializers
from .models import Question, Choice
class QuestionSerializer(serializers.ModelSerializer):
class Meta:
model = Question
fields = '__all__'
| [
"[email protected]"
]
| |
8d308bb5fcc1a686835c15b6f0c7d4dabfde7c44 | f9b7930e6f43eca26abf87b39961fc2d022db54a | /Python/medium/338. Counting Bits.py | 01ee506d021c0422aa75949e9d17355471bf95da | []
| no_license | LRenascence/LeetCode | 639452dd3bf65a14d0056c01e203a7082fbdc326 | 1a0e1d1503e0a7bff6917491a964a08c572827fb | refs/heads/master | 2021-05-12T03:41:35.346377 | 2021-01-07T23:39:14 | 2021-01-07T23:39:14 | 117,622,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 782 | py | """
Given a non negative integer number num. For every numbers i in the range 0 ≤ i ≤ num calculate the number of 1's in their binary representation and return them as an array.
Example 1:
Input: 2
Output: [0,1,1]
Example 2:
Input: 5
Output: [0,1,1,2,1,2]
Follow up:
It is very easy to come up with a solution with run time O(n*sizeof(integer)). But can you do it in linear time O(n) /possibly in a single pass?
Space complexity should be O(n).
Can you do it like a boss? Do it without using any builtin function like __builtin_popcount in c++ or in any other language.
"""
class Solution:
def countBits(self, num: int) -> List[int]:
result = [0] * (num + 1)
for i in range(num + 1):
result[i] = result[i >> 1] + (i & 1)
return result | [
"[email protected]"
]
| |
3ef43777b05972b64a9d10046115d44bce3e8128 | 0c672b0b8431064617831d16bf0982d5d3ce6c27 | /utils/proxy_api.py | bf5056d222433e6c27a71950ba9f9d043be6d898 | []
| no_license | buxuele/amazon_books | 617327376044ffd4e760fdc1a71962119717cfe8 | 691bd3e48bd1730dbc4a4a855e84e0b1c3e9c2ec | refs/heads/master | 2023-03-09T23:18:14.730828 | 2021-03-01T10:53:47 | 2021-03-01T10:53:47 | 342,610,804 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,017 | py | import time
import requests
from utils.my_timer import timer
from utils.get_user_agent import get_a_ua
from utils.mongoDB import Mongo
import config # 根目录 数据库名称。
from pprint import pprint
from concurrent.futures import ThreadPoolExecutor, wait, ALL_COMPLETED
class SmallProxy:
def __init__(self, china=True):
self.country = china
self.m = Mongo(config.proxy_db, config.proxy_coll)
self.url = "https://ip.jiangxianli.com/api/proxy_ips"
self.headers = {'User-Agent': get_a_ua()}
self.real_ip = self.find_myself()
@staticmethod
def find_myself():
target = 'http://httpbin.org/ip'
resp = requests.get(target)
return resp.json()["origin"]
# 获取更多的代理。这一部分写的很漂亮啊。自己写的就是很得意。
def make_payloads(self):
nations = ["俄罗斯", "美国", "加拿大", "日本", "德国", "香港", "印度尼西亚", "法国"]
if self.country:
pay = [{"page": c, "country": "中国", "order_by": "speed"} for c in range(1, 5)]
else:
pay = [{"page": 1, "country": b, "order_by": "speed"} for b in nations]
return pay
def greet(self, pay):
resp = requests.get(self.url, params=pay, headers=self.headers)
if resp.status_code == 200:
return resp.json()
else:
print(f"Sorry! 这个代理网站有问题!")
return None
@timer
def get_all_proxy(self):
temp = []
for k in self.make_payloads():
d = self.greet(k) # d <dict>
if d:
all_data = d["data"]["data"]
for t in all_data:
# if t["anonymity"] == 2: # 按匿名度来排除。
a = t["protocol"] + "://" + t["ip"] + ":" + t["port"]
temp.append(a)
print(temp)
print(len(temp))
return temp
def speed_status(self, proxy=None):
url = "http://httpbin.org/ip"
resp = requests.get(url, proxies={"http": proxy}, timeout=1)
# 只有当前使用的代理与自己真实的ip 不相等的时候,才说明这个代理是有效的。
if resp.status_code == 200 and resp.json()["origin"] != self.real_ip:
print("test ip", proxy)
print("real ip : ", resp.json()["origin"])
self.m.add_to_db({"url": proxy})
@timer
def run(self):
fake_proxy = self.get_all_proxy()
# 这里设置为20就很合适了,太多反而不利。
with ThreadPoolExecutor(max_workers=16) as executor:
future_tasks = [executor.submit(self.speed_status, p) for p in fake_proxy]
wait(future_tasks, return_when=ALL_COMPLETED)
def show_product(self):
self.m.get_unique(show=True)
if __name__ == '__main__':
p = SmallProxy(china=True)
# p.main()
p.run()
time.sleep(.1)
p.show_product()
| [
"[email protected]"
]
| |
34322ab0be08ec02c0cf670b8835ce5086251b9a | add5ca4ed6f5a5030cfcd60a09e502390ffc4936 | /full_code/paddle/conf/img_qa_gate2_gen.py | dd3f38a36d621d037da12a1a132552fe9d2eb6ae | []
| no_license | yangyi02/vision_language | 1f0b10e648a1ef0ea88edd30e41581d25969df27 | 9c55e5115d03bab58cf6165f63c9a6f426ed87ce | refs/heads/master | 2020-04-02T19:45:25.051432 | 2018-10-25T22:32:39 | 2018-10-25T22:32:39 | 154,745,259 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,331 | py | # -*- coding: utf-8 -*-
from math import sqrt
import os
import sys
from trainer.recurrent_units import LstmRecurrentUnit
model_type('recurrent_nn')
# data setting
gen_list = get_config_arg('gen_list', str, './gen.list')
result_file = get_config_arg('result_file', str, './result.txt')
# dictionary setting
dict_file = get_config_arg('dict_file', str, './dict.txt')
dict_pkl = get_config_arg('dict_pkl', str, './dict.pkl')
# image feature setting
img_feat_list = get_config_arg('img_feat_list', str, './img_feat.list')
# feature dimension setting
img_feat_dim = get_config_arg('img_feat_dim', int, 4096)
word_embedding_dim = 512
hidden_dim = 512
multimodal_dim = 1024
dict_dim = len(open(dict_file).readlines())
start_index = dict_dim-2
end_index = dict_dim-1
# hyperparameter setting
Settings(
batch_size = 8, # this must equal to trainer_count
learning_rate = 0,
)
# data provider setting
TestData(
PyData(
files = gen_list,
load_data_module = 'join_test',
load_data_object = 'processData',
load_data_args = ' '.join([dict_pkl, img_feat_list, str(img_feat_dim), '1.0'])
)
)
##### network #####
Inputs('question_id', 'img_feat', 'question')
Outputs('predict_word')
# data layers
DataLayer(name = 'question_id', size = 1)
DataLayer(name = 'img_feat', size = img_feat_dim)
DataLayer(name = 'question', size = dict_dim)
# question embedding input: question_embedding
MixedLayer(name = 'question_embedding',
size = word_embedding_dim,
bias = False,
inputs = TableProjection('question',
parameter_name = 'word_embedding',
),
)
# question hidden input
MixedLayer(name = 'question_input',
size = hidden_dim,
active_type = 'stanh',
inputs = FullMatrixProjection('question_embedding'),
)
# question hidden input: encoder
RecurrentLayerGroupBegin('encoder' + '_layer_group',
in_links = ['question_input'],
out_links = ['encoder'],
seq_reversed = False,
)
LstmRecurrentUnit(name = 'encoder',
size = hidden_dim/4,
active_type = 'relu',
state_active_type = 'linear',
gate_active_type = 'sigmoid',
inputs = [IdentityProjection('question_input')],
)
RecurrentLayerGroupEnd('encoder' + '_layer_group')
# get last of encoder
Layer(name = 'encoder_last',
type = 'seqlastins',
active_type = '',
bias = False,
inputs = [Input('encoder')],
)
# rnn1
RecurrentLayerGroupBegin('rnn1' + '_layer_group',
in_links = [],
out_links = ['predict_word'],
seq_reversed = False,
generator = Generator(
max_num_frames = 20,
beam_size = 5,
num_results_per_sample = 1,
),
)
img_feat_memory = Memory(name = 'img_feat_memory',
size = img_feat_dim,
boot_layer = 'img_feat',
is_sequence = False,
)
MixedLayer(name = 'img_feat_memory',
size = img_feat_dim,
bias = False,
inputs = IdentityProjection(img_feat_memory),
)
question_memory = Memory(name = 'question_memory',
size = hidden_dim/4,
boot_layer = 'encoder_last',
is_sequence = False,
)
MixedLayer(name = 'question_memory',
size = hidden_dim/4,
bias = False,
inputs = IdentityProjection(question_memory),
)
predict_word_memory = Memory(name = 'predict_word',
size = dict_dim,
boot_with_const_id = start_index,
)
MixedLayer(name = 'predict_word_embedding',
size = word_embedding_dim,
bias = False,
inputs = TableProjection(predict_word_memory,
parameter_name = 'word_embedding',
),
)
# hidden1
MixedLayer(name = 'hidden1',
size = hidden_dim,
active_type = 'stanh',
bias = Bias(parameter_name = '_hidden1.wbias'),
inputs = FullMatrixProjection('predict_word_embedding',
parameter_name = '_hidden1.w0'),
)
LstmRecurrentUnit(name = 'rnn1',
size = hidden_dim/4,
active_type = 'relu',
state_active_type = 'linear',
gate_active_type = 'sigmoid',
inputs = [IdentityProjection('hidden1')],
)
# language unit
MixedLayer(name = 'language',
size = multimodal_dim,
active_type = 'linear',
bias = Bias(parameter_name = '_language.wbias'),
inputs = [FullMatrixProjection(question_memory, parameter_name = '_language.w0'),
FullMatrixProjection('predict_word_embedding', parameter_name = '_language.w1'),
FullMatrixProjection('rnn1', parameter_name = '_language.w2'),
],
# drop_rate = 0.5,
)
MixedLayer(name = 'language_gate',
size = 1,
active_type = 'sigmoid',
bias = Bias(parameter_name = 'language_gate.b',
initial_std = 0.0, initial_mean = -2.0),
inputs = FullMatrixProjection('language',
parameter_name = 'language_gate_proj')
)
Layer(name = 'language_gate_expanded',
type = 'featmap_expand',
num_filters = multimodal_dim,
inputs = FullMatrixProjection('language_gate')
)
MixedLayer(name = 'gated_language',
size = multimodal_dim,
bias = False,
inputs = DotMulOperator(['language_gate_expanded', 'language'])
)
# hidden2
MixedLayer(name = 'hidden2',
size = multimodal_dim,
active_type = 'stanh',
bias = Bias(parameter_name = '_hidden2.wbias'),
inputs = [IdentityProjection('gated_language', parameter_name = '_hidden2.w0'),
FullMatrixProjection(img_feat_memory, parameter_name = '_hidden2.w1'),
],
# drop_rate = 0.5,
)
# hidden3
#Layer(
# name = 'hidden3',
# type = 'mixed',
# size = word_embedding_dim,
# active_type = 'stanh',
# inputs = FullMatrixProjection(
# 'hidden2',
# initial_std = sqrt(1. / multimodal_dim)),
#)
# output
Layer(name = 'output',
type = 'fc',
size = dict_dim,
active_type = 'softmax',
bias = Bias(parameter_name = '_output.wbias'),
inputs = [Input('hidden2', parameter_name = '_output.w0')],
#inputs = TransposedFullMatrixProjection(
# 'hidden3',
# parameter_name = 'wordvecs'),
)
Layer(
name = 'predict_word',
type = 'maxid',
inputs = 'output',
)
Layer(
name = 'eos_check',
type = 'eos_id',
eos_id = end_index,
inputs = ['predict_word'],
)
RecurrentLayerGroupEnd('rnn1' + '_layer_group')
# Write question and answer pairs to file
Evaluator(
name = 'caption_printer',
type = 'seq_text_printer',
dict_file = dict_file,
result_file = result_file,
#delimited = False,
inputs = ['question_id', 'question', 'predict_word'],
)
| [
"[email protected]"
]
| |
ad2ce039c61d85c1c0c5640333adb3f2fc42b67e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03369/s319661754.py | ec44e017019af609a02068a512dee343db5acc9e | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48 | py | S = list(input())
print(700+100*(S.count('o')))
| [
"[email protected]"
]
| |
eecde9e85f8bbc1b9eda6d9cab643cadd93edcab | d970e32d23e84fe0f6b5ba1694e2958d52fce586 | /sample_scripts/sample_tokenization.py | f165ed859675d95ce1ca9d1aa24545228ddd3e2f | [
"MIT"
]
| permissive | Kensuke-Mitsuzawa/sample-codes-supporters-tutorial | 8e6f1ed794732fa87176333286e65898e321f60f | ae9b544ddd3a782e76a30af257b43f88341ba696 | refs/heads/master | 2023-05-31T22:15:03.313349 | 2018-02-27T02:07:00 | 2018-02-27T02:07:00 | 79,502,186 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,661 | py | from JapaneseTokenizer import MecabWrapper
from typing import List, Tuple, Dict, Union, Any
import json
import logging
import collections
import itertools
logger = logging.getLogger()
logger.setLevel(10)
SLEEP_TIME = 2
"""形態素分割のサンプルコードを示します
Python3.5.1の環境下で動作を確認しています。
"""
__author__ = "Kensuke Mitsuzawa"
__author_email__ = "[email protected]"
__license_name__ = "MIT"
def tokenize_text(input_text:str,
tokenizer_obj:MecabWrapper,
pos_condition:List[Tuple[str,...]])->List[str]:
"""* What you can do
- 1文書に対して、形態素分割を実施する
"""
### 形態素分割;tokenize() -> 品詞フィルタリング;filter() -> List[str]に変換;convert_list_object()
return tokenizer_obj.tokenize(input_text).filter(pos_condition=pos_condition).convert_list_object()
### 原型(辞書系)に変換せず、活用された状態のまま、欲しい場合は is_surface=True のフラグを与える
#return tokenizer_obj.tokenize(input_text, is_surface=True).filter(pos_condition=pos_condition).convert_list_object()
def aggregate_words(seq_tokenized:List[List[str]])->collections.Counter:
"""* What you can do
- 形態素の集計カウントを実施する
* Params
- seq_tokenized
>>> [['スター・ウォーズ', 'エピソード4', '新たなる希望', 'スター・ウォーズ', 'エピソード4', 'なる', 'きぼう', 'STAR WARS', 'IV', 'A NEW HOPE', '1977年', 'する', 'アメリカ映画']]
"""
### 二次元リストを1次元に崩す; List[List[str]] -> List[str] ###
seq_words = itertools.chain.from_iterable(seq_tokenized)
word_frequency_obj = collections.Counter(seq_words)
return word_frequency_obj
def aggregate_words_by_label():
"""* What you can do
-
"""
pass
def main(tokenizer_obj:MecabWrapper,
seq_text_data:List[Dict[str,Any]],
pos_condition:List[Tuple[str,...]]):
"""* What you can do
- 形態素解析機の呼び出し
- 単語集計
"""
# --------------------------------------------------------------------------------------------------------------#
# 単純単語集計をする
### Python独特のリスト内包表記を利用する(リスト内包表記の方が実行速度が早い) ###
seq_tokenized_text = [
tokenize_text(input_text=wiki_text_obj['text'],tokenizer_obj=tokenizer_obj, pos_condition=pos_condition)
for wiki_text_obj in seq_text_data
]
### 単語集計を実施する ###
word_frequency_obj = aggregate_words(seq_tokenized_text)
### Counterオブジェクトはdict()関数で辞書化が可能 ###
dict(word_frequency_obj)
### 頻度順にソートするために [(word, 頻度)] の形にする
seq_word_frequency = [(word, frequency) for word, frequency in dict(word_frequency_obj).items()]
### 単語頻度順にソート ###
print('Top 100 word frequency without label')
print(sorted(seq_word_frequency, key=lambda x:x[1], reverse=True)[:100])
# --------------------------------------------------------------------------------------------------------------#
# ラベルごとに単語を集計する
### ラベル情報も保持しながら形態素分割の実行 ###
seq_tokenized_text = [
(wiki_text_obj['gold_label'], tokenize_text(input_text=wiki_text_obj['text'],tokenizer_obj=tokenizer_obj, pos_condition=pos_condition))
for wiki_text_obj in seq_text_data
]
#### ラベルごとの集約する ####
##### ラベルごとに集計するためのキーを返す匿名関数 #####
key_function= lambda x:x[0]
#### 必ず、groupbyの前にsortedを実施すること
g_object = itertools.groupby(sorted(seq_tokenized_text, key=key_function), key=key_function)
### リスト内包表記化も可能。わかりやすさのために、通常のループ表記をする ###
for label_name, element_in_label in g_object:
### element_in_label はgenerator objectで [(label, [word])]の構造を作る ###
seq_list_tokens_with_label = list(element_in_label)
seq_list_tokens = [label_tokens[1] for label_tokens in seq_list_tokens_with_label]
word_frequency_obj_label = aggregate_words(seq_list_tokens)
seq_word_frequency_label = [(word, frequency) for word, frequency in dict(word_frequency_obj_label).items()]
print('*'*30)
print('Top 100 words For label = {}'.format(label_name))
print(sorted(seq_word_frequency_label, key=lambda x:x[1], reverse=True)[:100])
if __name__ == '__main__':
### MecabWrapperを作る ###
mecab_obj = MecabWrapper(dictType='ipadic')
### 取得したい品詞だけを定義する ###
pos_condition = [('名詞', '固有名詞'), ('動詞', '自立'), ('形容詞', '自立')]
### wikipedia summaryデータを読み込み ###
print('=' * 50)
path_wikipedia_summary_json = './wikipedia_data/wikipedia-summary.json'
with open(path_wikipedia_summary_json, 'r') as f:
seq_wiki_summary_text = json.load(f)
main(tokenizer_obj=mecab_obj,
pos_condition=pos_condition,
seq_text_data=seq_wiki_summary_text)
### wikipedia fullデータを読み込み ###
print('=' * 50)
path_wikipedia_full_json = './wikipedia_data/wikipedia-full.json'
with open(path_wikipedia_full_json, 'r') as f:
seq_wiki_full_text = json.load(f)
main(tokenizer_obj=mecab_obj,
pos_condition=pos_condition,
seq_text_data=seq_wiki_full_text) | [
"[email protected]"
]
| |
691a09c696e5d06361215ef05998a05a23437589 | 6d1380a38aeb89df5db2f742ca0665f877a01133 | /extract.py | 294ccc36295e2534490615af52969899c62233dc | []
| no_license | marijnkoolen/constitution-reference-parser | 937ddbfdb56a1cba78093c7568e311ca6790f4f4 | 4083461abb4dd4cc8639625f9305b580eb69ec04 | refs/heads/master | 2021-01-02T09:27:17.951140 | 2015-09-29T12:47:49 | 2015-09-29T12:47:49 | 40,536,830 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,420 | py | import re
import patterns
from document import ReferenceList, Reference
def extract_refs(document, sentence):
sentenceDone = 0
# returns a dictionary of document specific patterns
pattern = patterns.makeRefPatterns(document.RefUnits())
refList = ReferenceList(sentence, pattern)
while not sentenceDone:
# Start of a reference
matchStart = re.search(refList.pattern['refStart'], refList.sentence)
if matchStart:
extract_start_ref(matchStart, refList, document)
while pattern['refDummy'] in refList.sentence:
extract_sequence_refs(refList, document)
else:
# assumption: there is no reference in this sentence
# action: signal extraction is done
refList.FinishCurrent()
sentenceDone = 1
# check if this is a complex reference sequence
return refList
def extract_start_ref(matchStart, refList, document):
refList.sentence = re.sub(matchStart.group(0), refList.pattern['refDummy'], refList.sentence, 1)
refType, num1, rangeSymbol, num2 = matchStart.groups()
refType = refType.lower()
refNums = makeRange(num1, rangeSymbol, num2)
if refType in document.SkipUnits:
refList.sentence = re.sub(refList.pattern['refDummy'], "", refList.sentence, 1)
return 0
addToRefList(refType, refNums, refList)
refList.UpdatePrev(refType)
return 0
def extract_sequence_refs(refList, document):
refNums = []
refType = None
sep, conj, part, refType, refNums = findSequenceType(refList, document)
if refNums == []:
# assumption: if there is no next pattern, the sequence is done
# action: remove the reference dummy
refList.sentence = re.sub(refList.pattern['refDummy'], "", refList.sentence, 1)
refList.FinishCurrent()
refList.UpdatePrev('')
return 0
elif refType:
refType = refType.lower()
# if found type is too deep in hierarchy, ignore it
# e.g. we don't consider paragraphs and refList.sentences as part of the reference
if refType in document.SkipUnits:
refList.UpdatePrev(refType)
return 0
elif refType == None:
# if previous type is too deep in hierarchy, ignore it
# e.g. we don't consider paragraphs and refList.sentences as part of the reference
if refList.prevUnit in document.SkipUnits:
refNums = []
if sep:
parse_separator_ref(refType, refNums, refList, document)
elif conj:
parse_conjunction_ref(refType, refNums, refList, document)
elif part:
parse_part_of_ref(refType, refNums, refList)
if refType != None:
refList.UpdatePrev(refType)
def findSequenceType(refList, document):
mSepConjNumber = re.search(refList.pattern['refSepConjNumber'], refList.sentence)
mSepConjPartTypeNumber = re.search(refList.pattern['refSepConjPartTypeNumber'], refList.sentence)
sep = None
conj = None
part = None
refType = None
refNums = []
if mSepConjNumber:
refList.sentence = re.sub(mSepConjNumber.group(0), refList.pattern['refDummy'], refList.sentence, 1)
sep, conj, num1, rangeSymbol, num2 = mSepConjNumber.groups()
refNums = makeRange(num1, rangeSymbol, num2)
elif mSepConjPartTypeNumber:
refList.sentence = re.sub(mSepConjPartTypeNumber.group(0), refList.pattern['refDummy'], refList.sentence, 1)
sep, conj, part, refType, num1, rangeSymbol, num2 = mSepConjPartTypeNumber.groups()
refNums = makeRange(num1, rangeSymbol, num2)
return (sep, conj, part, refType, refNums)
def parse_separator_ref(refType, refNums, refList, document):
# 1. ref sep number -> new ref of same type
# assumption: type of new ref is implicit
# action: add refs similar to previous type
if refType == None:
addToRefList(None, refNums, refList)
# 2. ref sep type number -> new ref of same type
# assumption: type of new ref is explicit and of same type
elif refType == refList.prevUnit:
addToRefList(None, refNums, refList)
# 3. ref sep type number -> specification of existing ref
# assumption: hierarchical relations are written from high to low
# action: replace previous reference with hierarchical reference
elif refType in document.ContainedBy and refList.prevUnit in document.ContainedBy[refType]:
prevRef = refList.Last()
refList.RemoveLast()
for refNum in refNums:
reference = Reference()
reference.CopyFrom(prevRef)
reference.AddPart(refType, refNum)
refList.AddCurrent(reference)
# 4. ref sep type number -> new ref of different type
# assumption: previous ref was hierarchical, new ref is higher in hierarchy
# action: add refType as new reference
else:
addToRefList(refType, refNums, refList)
def parse_conjunction_ref(refType, refNums, refList, document):
# ref conj number -> ref
# assumptions:
# 1. no mention of type suggests these are
# references of the same type as the
# previous reference
if refType == None:
addToRefList(None, refNums, refList)
# ref conj type number -> ref
# previous reference has same type and higher
# level type
# assumptions:
# 2. explicit mention of type suggest this is a
# separate reference, but share higher level
# type
elif refType == refList.prevUnit:
prevRef = refList.Last()
for container in document.ContainedBy[refType]:
if container in prevRef.TargetParts:
for refNum in refNums:
reference = Reference()
reference.CopyFrom(prevRef)
reference.AddPart(refType, refNum)
refList.AddCurrent(reference)
break
# ref conj type number -> ref
# assumptions:
# 3. explicit mention of type suggests these are
# separate references
else:
addToRefList(refType, refNums, refList)
def parse_part_of_ref(refType, refNums, refList):
# ref part type number -> ref
# assumptions:
# 1. part of signals end of sequence
# 2. new type is container of all refs in sequence
for refNum in refNums:
for reference in refList.current:
reference.AddPart(refType, refNum)
refList.prevUnit = ''
refList.FinishCurrent()
# remove dummy reference
refList.sentence = re.sub(refList.pattern['refDummy'], "", refList.sentence, 1)
def addToRefList(refType, refNums, refList):
#print "DEBUG: addToRefList"
for refNum in refNums:
reference = Reference()
#print "adding reference of type {0} with number {1}".format(refType, refNum)
if refType == None:
reference.CopyFrom(refList.Last())
refType = refList.prevUnit
reference.AddPart(refType, refNum)
refList.AddCurrent(reference)
def makeRange(num1, rangeSymbol, num2):
if rangeSymbol and num2:
if int(num2) < int(num1):
return [num1]
return [unicode(num) for num in range(int(num1), int(num2)+1)]
return [num1]
| [
"[email protected]"
]
| |
e097a16f0379513c2092c5425fad847644f49308 | 091e97bcfe5acc0635bd601aa8497e377b74d41a | /openshift/installer/vendored/openshift-ansible-3.11.28-1/roles/lib_openshift/library/oc_serviceaccount_secret.py | 4670e3e95ddc2ebdd06ba5c38649e792aac37e53 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
]
| permissive | openshift/openshift-tools | d59b63778f25cb8fb3c7a0253afe22a173e72f9d | e342f6659a4ef1a188ff403e2fc6b06ac6d119c7 | refs/heads/prod | 2023-08-30T01:52:04.108978 | 2022-03-23T21:07:28 | 2022-03-23T21:07:28 | 36,827,699 | 170 | 254 | Apache-2.0 | 2022-06-16T12:11:51 | 2015-06-03T20:09:22 | Python | UTF-8 | Python | false | false | 60,388 | py | #!/usr/bin/env python
# pylint: disable=missing-docstring
# flake8: noqa: T001
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
#
# Copyright 2016 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
from __future__ import print_function
import atexit
import copy
import fcntl
import json
import time
import os
import re
import shutil
import subprocess
import tempfile
# pylint: disable=import-error
try:
import ruamel.yaml as yaml
except ImportError:
import yaml
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/serviceaccount_secret -*- -*- -*-
DOCUMENTATION = '''
---
module: oc_serviceaccount_secret
short_description: Module to manage openshift service account secrets
description:
- Manage openshift service account secrets programmatically.
options:
state:
description:
- If present, the service account will be linked with the secret if it is not already. If absent, the service account will be unlinked from the secret if it is already linked. If list, information about the service account secrets will be gathered and returned as part of the Ansible call results.
required: false
default: present
choices: ["present", "absent", "list"]
aliases: []
kubeconfig:
description:
- The path for the kubeconfig file to use for authentication
required: false
default: /etc/origin/master/admin.kubeconfig
aliases: []
debug:
description:
- Turn on debug output.
required: false
default: false
aliases: []
service_account:
description:
- Name of the service account.
required: true
default: None
aliases: []
namespace:
description:
- Namespace of the service account and secret.
required: true
default: None
aliases: []
secret:
description:
- The secret that should be linked to the service account.
required: false
default: None
aliases: []
author:
- "Kenny Woodson <[email protected]>"
extends_documentation_fragment: []
'''
EXAMPLES = '''
- name: get secrets of a service account
oc_serviceaccount_secret:
state: list
service_account: builder
namespace: default
register: sasecretout
- name: Link a service account to a specific secret
oc_serviceaccount_secret:
service_account: builder
secret: mynewsecret
namespace: default
register: sasecretout
'''
# -*- -*- -*- End included fragment: doc/serviceaccount_secret -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods,too-many-instance-attributes
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup_ext=None,
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
if backup_ext is None:
self.backup_ext = ".{}".format(time.strftime("%Y%m%dT%H%M%S"))
else:
self.backup_ext = backup_ext
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for separator '''
return self._separator
@separator.setter
def separator(self, inc_sep):
''' setter method for separator '''
self._separator = inc_sep
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def remove_entry(data, key, index=None, value=None, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
if value is not None:
data.pop(value)
elif index is not None:
raise YeditException("remove_entry for a dictionary does not have an index {}".format(index))
else:
data.clear()
return True
elif key == '' and isinstance(data, list):
ind = None
if value is not None:
try:
ind = data.index(value)
except ValueError:
return False
elif index is not None:
ind = index
else:
del data[:]
if ind is not None:
data.pop(ind)
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
return data
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
fcntl.flock(yfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
yfd.write(contents)
fcntl.flock(yfd, fcntl.LOCK_UN)
os.rename(tmp_filename, filename)
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, '{}{}'.format(self.filename, self.backup_ext))
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripDumper if supported.
if self.content_type == 'yaml':
try:
Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except AttributeError:
Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
elif self.content_type == 'json':
Yedit._write(self.filename, json.dumps(self.yaml_dict, indent=4, sort_keys=True))
else:
raise YeditException('Unsupported content_type: {}.'.format(self.content_type) +
'Please specify a content_type of yaml or json.')
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename is None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripLoader if supported.
try:
self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
except AttributeError:
self.yaml_dict = yaml.safe_load(contents)
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path, index=None, value=None):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, index, value, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# AUDIT:maybe-no-member makes sense due to loading data from
# a serialized format.
# pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index is not None:
ind = index
if ind is not None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
# already exists, return
if ind is not None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is None:
return (False, self.yaml_dict)
# When path equals "" it is a special case.
# "" refers to the root of the document
# Only update the root path (entire document) when its a list or dict
if path == '':
if isinstance(result, list) or isinstance(result, dict):
self.yaml_dict = result
return (True, self.yaml_dict)
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
@staticmethod
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
curr_value = yaml.safe_load(str(invalue))
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
@staticmethod
def parse_value(inc_value, vtype=''):
'''determine value type passed'''
true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
'on', 'On', 'ON', ]
false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
'off', 'Off', 'OFF']
# It came in as a string but you didn't specify value_type as string
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
# There is a special case where '' will turn into None after yaml loading it so skip
if isinstance(inc_value, str) and inc_value == '':
pass
# If vtype is not str then go ahead and attempt to yaml load it.
elif isinstance(inc_value, str) and 'str' not in vtype:
try:
inc_value = yaml.safe_load(inc_value)
except Exception:
raise YeditException('Could not determine type of incoming value. ' +
'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
@staticmethod
def process_edits(edits, yamlfile):
'''run through a list of edits and process them one-by-one'''
results = []
for edit in edits:
value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
if edit.get('action') == 'update':
# pylint: disable=line-too-long
curr_value = Yedit.get_curr_value(
Yedit.parse_value(edit.get('curr_value')),
edit.get('curr_value_format'))
rval = yamlfile.update(edit['key'],
value,
edit.get('index'),
curr_value)
elif edit.get('action') == 'append':
rval = yamlfile.append(edit['key'], value)
else:
rval = yamlfile.put(edit['key'], value)
if rval[0]:
results.append({'key': edit['key'], 'edit': rval[1]})
return {'changed': len(results) > 0, 'results': results}
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(params):
'''perform the idempotent crud operations'''
yamlfile = Yedit(filename=params['src'],
backup=params['backup'],
content_type=params['content_type'],
backup_ext=params['backup_ext'],
separator=params['separator'])
state = params['state']
if params['src']:
rval = yamlfile.load()
if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
'file exists, that it is has correct permissions, and is valid yaml.'}
if state == 'list':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['key']:
rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
elif state == 'absent':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['update']:
rval = yamlfile.pop(params['key'], params['value'])
else:
rval = yamlfile.delete(params['key'], params['index'], params['value'])
if rval[0] and params['src']:
yamlfile.write()
return {'changed': rval[0], 'result': rval[1], 'state': state}
elif state == 'present':
# check if content is different than what is in the file
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
params['value'] is None:
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
# If we were passed a key, value then
# we enapsulate it in a list and process it
# Key, Value passed to the module : Converted to Edits list #
edits = []
_edit = {}
if params['value'] is not None:
_edit['value'] = params['value']
_edit['value_type'] = params['value_type']
_edit['key'] = params['key']
if params['update']:
_edit['action'] = 'update'
_edit['curr_value'] = params['curr_value']
_edit['curr_value_format'] = params['curr_value_format']
_edit['index'] = params['index']
elif params['append']:
_edit['action'] = 'append'
edits.append(_edit)
elif params['edits'] is not None:
edits = params['edits']
if edits:
results = Yedit.process_edits(edits, yamlfile)
# if there were changes and a src provided to us we need to write
if results['changed'] and params['src']:
yamlfile.write()
return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
'state': state}
# We were passed content but no src, key or value, or edits. Return contents in memory
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
# pylint: disable=too-many-lines
# noqa: E301,E302,E303,T001
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
def locate_oc_binary():
''' Find and return oc binary file '''
# https://github.com/openshift/openshift-ansible/issues/3410
# oc can be in /usr/local/bin in some cases, but that may not
# be in $PATH due to ansible/sudo
paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
oc_binary = 'oc'
# Use shutil.which if it is available, otherwise fallback to a naive path search
try:
which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
if which_result is not None:
oc_binary = which_result
except AttributeError:
for path in paths:
if os.path.exists(os.path.join(path, oc_binary)):
oc_binary = os.path.join(path, oc_binary)
break
return oc_binary
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, edits=None, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, res['results'][0], separator=sep)
updated = False
if content is not None:
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
updated = True
elif edits is not None:
results = Yedit.process_edits(edits, yed)
if results['changed']:
updated = True
if updated:
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
# We are removing the 'resourceVersion' to handle
# a race condition when modifying oc objects
yed = Yedit(fname)
results = yed.delete('metadata.resourceVersion')
if results[0]:
yed.write()
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''create a temporary file and then call oc create on it'''
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
cmd = ['delete', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
else:
raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
'''process a template
template_name: the name of the template to process
create: whether to send to oc create after processing
params: the parameters for the template
template_data: the incoming template's data; instead of a file
'''
cmd = ['process']
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-p')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = Utils.create_tmpfile(template_name + '-')
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['create', '-f', fname])
def _get(self, resource, name=None, selector=None, field_selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
if field_selector is not None:
cmd.append('--field-selector={}'.format(field_selector))
# Name cannot be used with selector or field_selector.
if selector is None and field_selector is None and name is not None:
cmd.append(name)
cmd.extend(['-o', 'json'])
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if 'items' in rval:
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm list pods
node: the node in which to list pods
selector: the label selector filter if provided
pod_selector: the pod selector filter if provided
'''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _version(self):
''' return the openshift version'''
return self.openshift_cmd(['version'], output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
def _run(self, cmds, input_data):
''' Actually executes the command. This makes mocking easier. '''
curr_env = os.environ.copy()
curr_env.update({'KUBECONFIG': self.kubeconfig})
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=curr_env)
stdout, stderr = proc.communicate(input_data)
return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = [self.oc_binary]
if oadm:
cmds.append('adm')
cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
if self.verbose:
print(' '.join(cmds))
try:
returncode, stdout, stderr = self._run(cmds, input_data)
except OSError as ex:
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"cmd": ' '.join(cmds)}
if output_type == 'json':
rval['results'] = {}
if output and stdout:
try:
rval['results'] = json.loads(stdout)
except ValueError as verr:
if "No JSON object could be decoded" in verr.args:
rval['err'] = verr.args
elif output_type == 'raw':
rval['results'] = stdout if output else ''
if self.verbose:
print("STDOUT: {0}".format(stdout))
print("STDERR: {0}".format(stderr))
if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
"stdout": stdout})
return rval
class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripDumper'):
Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
else:
Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
Utils._write(tmp, data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [tmp])
return tmp
@staticmethod
def create_tmpfile_copy(inc_file):
'''create a temporary copy of a file'''
tmpfile = Utils.create_tmpfile('lib_openshift-')
Utils._write(tmpfile, open(inc_file).read())
# Cleanup the tmpfile
atexit.register(Utils.cleanup, [tmpfile])
return tmpfile
@staticmethod
def create_tmpfile(prefix='tmp'):
''' Generates and returns a temporary file name '''
with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
return tmp.name
@staticmethod
def create_tmp_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_tmp_file_from_contents(item['path'] + '-',
item['data'],
ftype=content_type)
files.append({'name': os.path.basename(item['path']),
'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if 'metadata' in result and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripLoader'):
contents = yaml.load(contents, yaml.RoundTripLoader)
else:
contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
@staticmethod
def filter_versions(stdout):
''' filter the oc version output '''
version_dict = {}
version_search = ['oc', 'openshift', 'kubernetes']
for line in stdout.strip().split('\n'):
for term in version_search:
if not line:
continue
if line.startswith(term):
version_dict[term] = line.split()[-1]
# horrible hack to get openshift version in Openshift 3.2
# By default "oc version in 3.2 does not return an "openshift" version
if "openshift" not in version_dict:
version_dict["openshift"] = version_dict["oc"]
return version_dict
@staticmethod
def add_custom_versions(versions):
''' create custom versions strings '''
versions_dict = {}
for tech, version in versions.items():
# clean up "-" from version
if "-" in version:
version = version.split("-")[0]
if version.startswith('v'):
version = version[1:] # Remove the 'v' prefix
versions_dict[tech + '_numeric'] = version.split('+')[0]
# "3.3.0.33" is what we have, we want "3.3"
versions_dict[tech + '_short'] = "{}.{}".format(*version.split('.'))
return versions_dict
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
import rpm
transaction_set = rpm.TransactionSet()
rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if key not in user_def:
if debug:
print('User data does not have key [%s]' % key)
print('User data: %s' % user_def)
return False
if not isinstance(user_def[key], list):
if debug:
print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
return False
if len(user_def[key]) != len(value):
if debug:
print("List lengths are not equal.")
print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
print("user_def: %s" % user_def[key])
print("value: %s" % value)
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print('sending list - list')
print(type(values[0]))
print(type(values[1]))
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print('list compare returned false')
return False
elif value != user_def[key]:
if debug:
print('value should be identical')
print(user_def[key])
print(value)
return False
# recurse on a dictionary
elif isinstance(value, dict):
if key not in user_def:
if debug:
print("user_def does not have key [%s]" % key)
return False
if not isinstance(user_def[key], dict):
if debug:
print("dict returned false: not instance of dict")
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print("keys are not equal in dict")
print(user_values)
print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print("dict returned false")
print(result)
return False
# Verify each key, value pair is the same
else:
if key not in user_def or value != user_def[key]:
if debug:
print("value not equal; user_def does not have key")
print(key)
print(value)
if key in user_def:
print(user_def[key])
return False
if debug:
print('returning true')
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self, ascommalist=''):
'''return all options as a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs'''
return self.stringify(ascommalist)
def stringify(self, ascommalist=''):
''' return the options hash as cli params in a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
val = data['value']
rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/serviceaccount.py -*- -*- -*-
class ServiceAccountConfig(object):
'''Service account config class
This class stores the options and returns a default service account
'''
# pylint: disable=too-many-arguments
def __init__(self, sname, namespace, kubeconfig, secrets=None, image_pull_secrets=None):
self.name = sname
self.kubeconfig = kubeconfig
self.namespace = namespace
self.secrets = secrets or []
self.image_pull_secrets = image_pull_secrets or []
self.data = {}
self.create_dict()
def create_dict(self):
''' instantiate a properly structured volume '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'ServiceAccount'
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
self.data['secrets'] = []
if self.secrets:
for sec in self.secrets:
self.data['secrets'].append({"name": sec})
self.data['imagePullSecrets'] = []
if self.image_pull_secrets:
for sec in self.image_pull_secrets:
self.data['imagePullSecrets'].append({"name": sec})
class ServiceAccount(Yedit):
''' Class to wrap the oc command line tools '''
image_pull_secrets_path = "imagePullSecrets"
secrets_path = "secrets"
def __init__(self, content):
'''ServiceAccount constructor'''
super(ServiceAccount, self).__init__(content=content)
self._secrets = None
self._image_pull_secrets = None
@property
def image_pull_secrets(self):
''' property for image_pull_secrets '''
if self._image_pull_secrets is None:
self._image_pull_secrets = self.get(ServiceAccount.image_pull_secrets_path) or []
return self._image_pull_secrets
@image_pull_secrets.setter
def image_pull_secrets(self, secrets):
''' property for secrets '''
self._image_pull_secrets = secrets
@property
def secrets(self):
''' property for secrets '''
if not self._secrets:
self._secrets = self.get(ServiceAccount.secrets_path) or []
return self._secrets
@secrets.setter
def secrets(self, secrets):
''' property for secrets '''
self._secrets = secrets
def delete_secret(self, inc_secret):
''' remove a secret '''
remove_idx = None
for idx, sec in enumerate(self.secrets):
if sec['name'] == inc_secret:
remove_idx = idx
break
if remove_idx:
del self.secrets[remove_idx]
return True
return False
def delete_image_pull_secret(self, inc_secret):
''' remove a image_pull_secret '''
remove_idx = None
for idx, sec in enumerate(self.image_pull_secrets):
if sec['name'] == inc_secret:
remove_idx = idx
break
if remove_idx:
del self.image_pull_secrets[remove_idx]
return True
return False
def find_secret(self, inc_secret):
'''find secret'''
for secret in self.secrets:
if secret['name'] == inc_secret:
return secret
return None
def find_image_pull_secret(self, inc_secret):
'''find secret'''
for secret in self.image_pull_secrets:
if secret['name'] == inc_secret:
return secret
return None
def add_secret(self, inc_secret):
'''add secret'''
if self.secrets:
self.secrets.append({"name": inc_secret}) # pylint: disable=no-member
else:
self.put(ServiceAccount.secrets_path, [{"name": inc_secret}])
def add_image_pull_secret(self, inc_secret):
'''add image_pull_secret'''
if self.image_pull_secrets:
self.image_pull_secrets.append({"name": inc_secret}) # pylint: disable=no-member
else:
self.put(ServiceAccount.image_pull_secrets_path, [{"name": inc_secret}])
# -*- -*- -*- End included fragment: lib/serviceaccount.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_serviceaccount_secret.py -*- -*- -*-
class OCServiceAccountSecret(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
kind = 'sa'
def __init__(self, config, verbose=False):
''' Constructor for OpenshiftOC '''
super(OCServiceAccountSecret, self).__init__(config.namespace, kubeconfig=config.kubeconfig, verbose=verbose)
self.config = config
self.verbose = verbose
self._service_account = None
@property
def service_account(self):
''' Property for the service account '''
if not self._service_account:
self.get()
return self._service_account
@service_account.setter
def service_account(self, data):
''' setter for the service account '''
self._service_account = data
def exists(self, in_secret):
''' verifies if secret exists in the service account '''
result = self.service_account.find_secret(in_secret)
if not result:
return False
return True
def get(self):
''' get the service account definition from the master '''
sao = self._get(OCServiceAccountSecret.kind, self.config.name)
if sao['returncode'] == 0:
self.service_account = ServiceAccount(content=sao['results'][0])
sao['results'] = self.service_account.get('secrets')
return sao
def delete(self):
''' delete secrets '''
modified = []
for rem_secret in self.config.secrets:
modified.append(self.service_account.delete_secret(rem_secret))
if any(modified):
return self._replace_content(OCServiceAccountSecret.kind, self.config.name, self.service_account.yaml_dict)
return {'returncode': 0, 'changed': False}
def put(self):
''' place secrets into sa '''
modified = False
for add_secret in self.config.secrets:
if not self.service_account.find_secret(add_secret):
self.service_account.add_secret(add_secret)
modified = True
if modified:
return self._replace_content(OCServiceAccountSecret.kind, self.config.name, self.service_account.yaml_dict)
return {'returncode': 0, 'changed': False}
@staticmethod
# pylint: disable=too-many-return-statements,too-many-branches
# TODO: This function should be refactored into its individual parts.
def run_ansible(params, check_mode):
''' run the oc_serviceaccount_secret module'''
sconfig = ServiceAccountConfig(params['service_account'],
params['namespace'],
params['kubeconfig'],
[params['secret']],
None)
oc_sa_sec = OCServiceAccountSecret(sconfig, verbose=params['debug'])
state = params['state']
api_rval = oc_sa_sec.get()
#####
# Get
#####
if state == 'list':
return {'changed': False, 'results': api_rval['results'], 'state': "list"}
########
# Delete
########
if state == 'absent':
if oc_sa_sec.exists(params['secret']):
if check_mode:
return {'changed': True, 'msg': 'Would have removed the " + \
"secret from the service account.'}
api_rval = oc_sa_sec.delete()
return {'changed': True, 'results': api_rval, 'state': "absent"}
return {'changed': False, 'state': "absent"}
if state == 'present':
########
# Create
########
if not oc_sa_sec.exists(params['secret']):
if check_mode:
return {'changed': True, 'msg': 'Would have added the ' + \
'secret to the service account.'}
# Create it here
api_rval = oc_sa_sec.put()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
# return the created object
api_rval = oc_sa_sec.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': "present"}
return {'changed': False, 'results': api_rval, 'state': "present"}
return {'failed': True,
'changed': False,
'msg': 'Unknown state passed. %s' % state,
'state': 'unknown'}
# -*- -*- -*- End included fragment: class/oc_serviceaccount_secret.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: ansible/oc_serviceaccount_secret.py -*- -*- -*-
def main():
'''
ansible oc module to manage service account secrets.
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str',
choices=['present', 'absent', 'list']),
debug=dict(default=False, type='bool'),
namespace=dict(default=None, required=True, type='str'),
secret=dict(default=None, type='str'),
service_account=dict(required=True, type='str'),
),
supports_check_mode=True,
)
rval = OCServiceAccountSecret.run_ansible(module.params, module.check_mode)
if 'failed' in rval:
module.fail_json(**rval)
module.exit_json(**rval)
if __name__ == '__main__':
main()
# -*- -*- -*- End included fragment: ansible/oc_serviceaccount_secret.py -*- -*- -*-
| [
"[email protected]"
]
| |
fd7cdd39e9a8db86129719f700f436d19b4bc19f | 1b36425f798f484eda964b10a5ad72b37b4da916 | /posthog/models/event/event.py | 2e6d0625403431f36a01778187c27ed6f634ddce | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | dorucioclea/posthog | 0408baa2a7ae98e5bea352c516f741ddc17c0a3e | 8848981baf237117fb22d28af0770a0165881423 | refs/heads/master | 2023-01-23T11:01:57.942146 | 2023-01-13T09:03:00 | 2023-01-13T09:03:00 | 241,222,000 | 0 | 0 | MIT | 2020-02-17T22:34:37 | 2020-02-17T22:34:36 | null | UTF-8 | Python | false | false | 5,830 | py | import copy
import datetime
import re
from collections import defaultdict
from typing import Dict, List, Optional, Union
from dateutil.relativedelta import relativedelta
from django.db import models
from django.utils import timezone
from posthog.models.team import Team
SELECTOR_ATTRIBUTE_REGEX = r"([a-zA-Z]*)\[(.*)=[\'|\"](.*)[\'|\"]\]"
LAST_UPDATED_TEAM_ACTION: Dict[int, datetime.datetime] = {}
TEAM_EVENT_ACTION_QUERY_CACHE: Dict[int, Dict[str, tuple]] = defaultdict(dict)
# TEAM_EVENT_ACTION_QUERY_CACHE looks like team_id -> event ex('$pageview') -> query
TEAM_ACTION_QUERY_CACHE: Dict[int, str] = {}
DEFAULT_EARLIEST_TIME_DELTA = relativedelta(weeks=1)
class SelectorPart:
direct_descendant = False
unique_order = 0
def __init__(self, tag: str, direct_descendant: bool, escape_slashes: bool):
self.direct_descendant = direct_descendant
self.data: Dict[str, Union[str, List]] = {}
self.ch_attributes: Dict[str, Union[str, List]] = {} # attributes for CH
result = re.search(SELECTOR_ATTRIBUTE_REGEX, tag)
if result and "[id=" in tag:
self.data["attr_id"] = result[3]
self.ch_attributes["attr_id"] = result[3]
tag = result[1]
if result and "[" in tag:
self.data[f"attributes__attr__{result[2]}"] = result[3]
self.ch_attributes[result[2]] = result[3]
tag = result[1]
if "nth-child(" in tag:
parts = tag.split(":nth-child(")
self.data["nth_child"] = parts[1].replace(")", "")
self.ch_attributes["nth-child"] = self.data["nth_child"]
tag = parts[0]
if "." in tag:
parts = tag.split(".")
# Strip all slashes that are not followed by another slash
self.data["attr_class__contains"] = [self._unescape_class(p) if escape_slashes else p for p in parts[1:]]
tag = parts[0]
if tag:
self.data["tag_name"] = tag
@property
def extra_query(self) -> Dict[str, List[Union[str, List[str]]]]:
where: List[Union[str, List[str]]] = []
params: List[Union[str, List[str]]] = []
for key, value in self.data.items():
if "attr__" in key:
where.append(f"(attributes ->> 'attr__{key.split('attr__')[1]}') = %s")
else:
if "__contains" in key:
where.append(f"{key.replace('__contains', '')} @> %s::varchar(200)[]")
else:
where.append(f"{key} = %s")
params.append(value)
return {"where": where, "params": params}
def _unescape_class(self, class_name):
r"""Separate all double slashes "\\" (replace them with "\") and remove all single slashes between them."""
return "\\".join([p.replace("\\", "") for p in class_name.split("\\\\")])
class Selector:
parts: List[SelectorPart] = []
def __init__(self, selector: str, escape_slashes=True):
self.parts = []
# Sometimes people manually add *, just remove them as they don't do anything
selector = selector.replace("> * > ", "").replace("> *", "").strip()
tags = list(self._split(selector))
tags.reverse()
# Detecting selector parts
for index, tag in enumerate(tags):
if tag == ">" or tag == "":
continue
direct_descendant = index > 0 and tags[index - 1] == ">"
part = SelectorPart(tag, direct_descendant, escape_slashes)
part.unique_order = len([p for p in self.parts if p.data == part.data])
self.parts.append(copy.deepcopy(part))
def _split(self, selector):
in_attribute_selector = False
in_quotes: Optional[str] = None
part: List[str] = []
for char in selector:
if char == "[" and in_quotes is None:
in_attribute_selector = True
if char == "]" and in_quotes is None:
in_attribute_selector = False
if char in "\"'":
if in_quotes is not None:
if in_quotes == char:
in_quotes = None
else:
in_quotes = char
if char == " " and not in_attribute_selector:
yield "".join(part)
part = []
else:
part.append(char)
yield "".join(part)
class Event(models.Model):
class Meta:
indexes = [
models.Index(fields=["elements_hash"]),
models.Index(fields=["timestamp", "team_id", "event"]),
# Separately managed:
# models.Index(fields=["created_at"]),
# NOTE: The below index has been added as a manual migration in
# `posthog/migrations/0024_add_event_distinct_id_index.py, but I'm
# adding this here to improve visibility.
# models.Index(fields=["distinct_id"], name="idx_distinct_id"),
]
created_at: models.DateTimeField = models.DateTimeField(auto_now_add=True, null=True, blank=True)
team: models.ForeignKey = models.ForeignKey(Team, on_delete=models.CASCADE)
event: models.CharField = models.CharField(max_length=200, null=True, blank=True)
distinct_id: models.CharField = models.CharField(max_length=200)
properties: models.JSONField = models.JSONField(default=dict)
timestamp: models.DateTimeField = models.DateTimeField(default=timezone.now, blank=True)
elements_hash: models.CharField = models.CharField(max_length=200, null=True, blank=True)
site_url: models.CharField = models.CharField(max_length=200, null=True, blank=True)
# DEPRECATED: elements are stored against element groups now
elements: models.JSONField = models.JSONField(default=list, null=True, blank=True)
| [
"[email protected]"
]
| |
08241ca33d0d08c4b7977714c1b9eef5676f3ab5 | dd694c300d0380df35c62f7ab7667346214ea296 | /rwlaunchpad/test/mano_ut.py | ee095d1f55d1a6facbe64532bbb9685047d453d1 | []
| no_license | RIFTIO/SO | 9412858132db0430217a2c5c55fb4b1db89290fa | 697160573011d47f45bd0b955a291a46063d3b15 | refs/heads/RIFT.ware-4.3.3 | 2021-06-22T13:42:40.860291 | 2016-12-29T21:47:25 | 2016-12-29T21:47:25 | 75,762,640 | 1 | 1 | null | 2017-02-08T19:31:28 | 2016-12-06T19:11:20 | Python | UTF-8 | Python | false | false | 45,990 | py | #!/usr/bin/env python3
#
# Copyright 2016 RIFT.IO Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import os
import sys
import unittest
import uuid
import xmlrunner
import argparse
import logging
import time
import types
import gi
gi.require_version('RwCloudYang', '1.0')
gi.require_version('RwDts', '1.0')
gi.require_version('RwNsmYang', '1.0')
gi.require_version('RwLaunchpadYang', '1.0')
gi.require_version('RwResourceMgrYang', '1.0')
gi.require_version('RwcalYang', '1.0')
gi.require_version('RwNsrYang', '1.0')
gi.require_version('NsrYang', '1.0')
gi.require_version('RwlogMgmtYang', '1.0')
from gi.repository import (
RwCloudYang as rwcloudyang,
RwDts as rwdts,
RwLaunchpadYang as launchpadyang,
RwNsmYang as rwnsmyang,
RwNsrYang as rwnsryang,
NsrYang as nsryang,
RwResourceMgrYang as rmgryang,
RwcalYang as rwcalyang,
RwConfigAgentYang as rwcfg_agent,
RwlogMgmtYang
)
from gi.repository.RwTypes import RwStatus
import rift.mano.examples.ping_pong_nsd as ping_pong_nsd
import rift.tasklets
import rift.test.dts
import rw_peas
openstack_info = {
'username': 'pluto',
'password': 'mypasswd',
'auth_url': 'http://10.66.4.27:5000/v3/',
'project_name': 'demo',
'mgmt_network': 'private',
}
if sys.version_info < (3, 4, 4):
asyncio.ensure_future = asyncio.async
class XPaths(object):
@staticmethod
def nsd(k=None):
return ("C,/nsd:nsd-catalog/nsd:nsd" +
("[nsd:id='{}']".format(k) if k is not None else ""))
@staticmethod
def vld(k=None):
return ("C,/vld:vld-catalog/vld:vld" +
("[vld:id='{}']".format(k) if k is not None else ""))
@staticmethod
def vnfd(k=None):
return ("C,/vnfd:vnfd-catalog/vnfd:vnfd" +
("[vnfd:id='{}']".format(k) if k is not None else ""))
@staticmethod
def vnfr(k=None):
return ("D,/vnfr:vnfr-catalog/vnfr:vnfr" +
("[vnfr:id='{}']".format(k) if k is not None else ""))
@staticmethod
def vlr(k=None):
return ("D,/vlr:vlr-catalog/vlr:vlr" +
("[vlr:id='{}']".format(k) if k is not None else ""))
@staticmethod
def nsd_ref_count(k=None):
return ("D,/nsr:ns-instance-opdata/rw-nsr:nsd-ref-count" +
("[rw-nsr:nsd-id-ref='{}']".format(k) if k is not None else ""))
@staticmethod
def vnfd_ref_count(k=None):
return ("D,/vnfr:vnfr-catalog/rw-vnfr:vnfd-ref-count" +
("[rw-nsr:nsd-id-ref='{}']".format(k) if k is not None else ""))
@staticmethod
def nsr_config(k=None):
return ("C,/nsr:ns-instance-config/nsr:nsr" +
("[nsr:id='{}']".format(k) if k is not None else ""))
@staticmethod
def nsr_opdata(k=None):
return ("D,/nsr:ns-instance-opdata/nsr:nsr" +
("[nsr:ns-instance-config-ref='{}']".format(k) if k is not None else ""))
@staticmethod
def nsr_config_status(k=None):
return ("D,/nsr:ns-instance-opdata/nsr:nsr" +
("[nsr:ns-instance-config-ref='{}']/config_status".format(k) if k is not None else ""))
@staticmethod
def cm_state(k=None):
if k is None:
return ("D,/rw-conman:cm-state/rw-conman:cm-nsr")
else:
return ("D,/rw-conman:cm-state/rw-conman:cm-nsr" +
("[rw-conman:id='{}']".format(k) if k is not None else ""))
@staticmethod
def nsr_scale_group_instance(nsr_id=None, group_name=None, index=None):
return (("D,/nsr:ns-instance-opdata/nsr:nsr") +
("[nsr:ns-instance-config-ref='{}']".format(nsr_id) if nsr_id is not None else "") +
("/nsr:scaling-group-record") +
("[nsr:scaling-group-name-ref='{}']".format(group_name) if group_name is not None else "") +
("/nsr:instance") +
("[nsr:scaling-group-index-ref='{}']".format(index) if index is not None else ""))
@staticmethod
def nsr_scale_group_instance_config(nsr_id=None, group_name=None, index=None):
return (("C,/nsr:ns-instance-config/nsr:nsr") +
("[nsr:id='{}']".format(nsr_id) if nsr_id is not None else "") +
("/nsr:scaling-group") +
("[nsr:scaling-group-name-ref='{}']".format(group_name) if group_name is not None else "") +
("/nsr:instance") +
("[nsr:index='{}']".format(index) if index is not None else ""))
class ManoQuerier(object):
def __init__(self, log, dts):
self.log = log
self.dts = dts
@asyncio.coroutine
def _read_query(self, xpath, do_trace=False):
self.log.debug("Running XPATH read query: %s (trace: %s)", xpath, do_trace)
flags = rwdts.XactFlag.MERGE
flags += rwdts.XactFlag.TRACE if do_trace else 0
res_iter = yield from self.dts.query_read(
xpath, flags=flags
)
results = []
for i in res_iter:
result = yield from i
if result is not None:
results.append(result.result)
return results
@asyncio.coroutine
def get_cm_state(self, nsr_id=None):
return (yield from self._read_query(XPaths.cm_state(nsr_id), False))
@asyncio.coroutine
def get_nsr_opdatas(self, nsr_id=None):
return (yield from self._read_query(XPaths.nsr_opdata(nsr_id), False))
@asyncio.coroutine
def get_nsr_scale_group_instance_opdata(self, nsr_id=None, group_name=None, index=None):
return (yield from self._read_query(XPaths.nsr_scale_group_instance(nsr_id, group_name, index), False))
#return (yield from self._read_query(XPaths.nsr_scale_group_instance(nsr_id, group_name), True))
@asyncio.coroutine
def get_nsr_configs(self, nsr_id=None):
return (yield from self._read_query(XPaths.nsr_config(nsr_id)))
@asyncio.coroutine
def get_nsr_config_status(self, nsr_id=None):
return (yield from self._read_query(XPaths.nsr_config_status(nsr_id)))
@asyncio.coroutine
def get_vnfrs(self, vnfr_id=None):
return (yield from self._read_query(XPaths.vnfr(vnfr_id)))
@asyncio.coroutine
def get_vlrs(self, vlr_id=None):
return (yield from self._read_query(XPaths.vlr(vlr_id)))
@asyncio.coroutine
def get_nsd_ref_counts(self, nsd_id=None):
return (yield from self._read_query(XPaths.nsd_ref_count(nsd_id)))
@asyncio.coroutine
def get_vnfd_ref_counts(self, vnfd_id=None):
return (yield from self._read_query(XPaths.vnfd_ref_count(vnfd_id)))
@asyncio.coroutine
def delete_nsr(self, nsr_id):
with self.dts.transaction() as xact:
yield from self.dts.query_delete(
XPaths.nsr_config(nsr_id),
0
#rwdts.XactFlag.TRACE,
#rwdts.Flag.ADVISE,
)
@asyncio.coroutine
def delete_nsd(self, nsd_id):
nsd_xpath = XPaths.nsd(nsd_id)
self.log.debug("Attempting to delete NSD with path = %s", nsd_xpath)
with self.dts.transaction() as xact:
yield from self.dts.query_delete(
nsd_xpath,
rwdts.XactFlag.ADVISE,
)
@asyncio.coroutine
def delete_vnfd(self, vnfd_id):
vnfd_xpath = XPaths.vnfd(vnfd_id)
self.log.debug("Attempting to delete VNFD with path = %s", vnfd_xpath)
with self.dts.transaction() as xact:
yield from self.dts.query_delete(
vnfd_xpath,
rwdts.XactFlag.ADVISE,
)
@asyncio.coroutine
def update_nsd(self, nsd_id, nsd_msg):
nsd_xpath = XPaths.nsd(nsd_id)
self.log.debug("Attempting to update NSD with path = %s", nsd_xpath)
with self.dts.transaction() as xact:
yield from self.dts.query_update(
nsd_xpath,
rwdts.XactFlag.ADVISE,
nsd_msg,
)
@asyncio.coroutine
def update_vnfd(self, vnfd_id, vnfd_msg):
vnfd_xpath = XPaths.vnfd(vnfd_id)
self.log.debug("Attempting to delete VNFD with path = %s", vnfd_xpath)
with self.dts.transaction() as xact:
yield from self.dts.query_update(
vnfd_xpath,
rwdts.XactFlag.ADVISE,
vnfd_msg,
)
@asyncio.coroutine
def update_nsr_config(self, nsr_id, nsr_msg):
nsr_xpath = XPaths.nsr_config(nsr_id)
self.log.debug("Attempting to update NSR with path = %s", nsr_xpath)
with self.dts.transaction() as xact:
yield from self.dts.query_update(
nsr_xpath,
rwdts.XactFlag.ADVISE|rwdts.XactFlag.REPLACE,
nsr_msg,
)
class ManoTestCase(rift.test.dts.AbstractDTSTest):
@asyncio.coroutine
def verify_nsr_state(self, nsr_id, state):
nsrs = yield from self.querier.get_nsr_opdatas(nsr_id)
self.assertEqual(1, len(nsrs))
nsr = nsrs[0]
self.log.debug("Got nsr = %s", nsr)
self.assertEqual(state, nsr.operational_status)
@asyncio.coroutine
def verify_vlr_state(self, vlr_id, state):
vlrs = yield from self.querier.get_vlrs(vlr_id)
self.assertEqual(1, len(vlrs))
vlr = vlrs[0]
self.assertEqual(state, vlr.operational_status)
def verify_vdu_state(self, vdu, state):
self.assertEqual(state, vdu.operational_status)
@asyncio.coroutine
def verify_vnf_state(self, vnfr_id, state):
vnfrs = yield from self.querier.get_vnfrs(vnfr_id)
self.assertEqual(1, len(vnfrs))
vnfr = vnfrs[0]
self.assertEqual(state, vnfr.operational_status)
@asyncio.coroutine
def terminate_nsr(self, nsr_id):
self.log.debug("Terminating nsr id: %s", nsr_id)
yield from self.querier.delete_nsr(nsr_id)
@asyncio.coroutine
def verify_nsr_deleted(self, nsr_id):
nsr_opdatas = yield from self.querier.get_nsr_opdatas(nsr_id)
self.assertEqual(0, len(nsr_opdatas))
nsr_configs = yield from self.querier.get_nsr_configs(nsr_id)
self.assertEqual(0, len(nsr_configs))
@asyncio.coroutine
def verify_num_vlrs(self, num_vlrs):
vlrs = yield from self.querier.get_vlrs()
self.assertEqual(num_vlrs, len(vlrs))
@asyncio.coroutine
def get_nsr_vlrs(self, nsr_id):
nsrs = yield from self.querier.get_nsr_opdatas(nsr_id)
return [v.vlr_ref for v in nsrs[0].vlr]
@asyncio.coroutine
def get_nsr_vnfs(self, nsr_id):
nsrs = yield from self.querier.get_nsr_opdatas(nsr_id)
return nsrs[0].constituent_vnfr_ref
@asyncio.coroutine
def get_vnf_vlrs(self, vnfr_id):
vnfrs = yield from self.querier.get_vnfrs(vnfr_id)
return [i.vlr_ref for i in vnfrs[0].internal_vlr]
@asyncio.coroutine
def verify_num_nsr_vlrs(self, nsr_id, num_vlrs):
vlrs = yield from self.get_nsr_vlrs(nsr_id)
self.assertEqual(num_vlrs, len(vlrs))
@asyncio.coroutine
def verify_num_nsr_vnfrs(self, nsr_id, num_vnfs):
vnfs = yield from self.get_nsr_vnfs(nsr_id)
self.assertEqual(num_vnfs, len(vnfs))
@asyncio.coroutine
def verify_num_vnfr_vlrs(self, vnfr_id, num_vlrs):
vlrs = yield from self.get_vnf_vlrs(vnfr_id)
self.assertEqual(num_vlrs, len(vlrs))
@asyncio.coroutine
def get_vnf_vdus(self, vnfr_id):
vnfrs = yield from self.querier.get_vnfrs(vnfr_id)
return [i for i in vnfrs[0].vdur]
@asyncio.coroutine
def verify_num_vnfr_vdus(self, vnfr_id, num_vdus):
vdus = yield from self.get_vnf_vdus(vnfr_id)
self.assertEqual(num_vdus, len(vdus))
@asyncio.coroutine
def verify_num_vnfrs(self, num_vnfrs):
vnfrs = yield from self.querier.get_vnfrs()
self.assertEqual(num_vnfrs, len(vnfrs))
@asyncio.coroutine
def verify_nsd_ref_count(self, nsd_id, num_ref):
nsd_ref_counts = yield from self.querier.get_nsd_ref_counts(nsd_id)
self.assertEqual(num_ref, nsd_ref_counts[0].instance_ref_count)
class DescriptorPublisher(object):
def __init__(self, log, loop, dts):
self.log = log
self.loop = loop
self.dts = dts
self._registrations = []
@asyncio.coroutine
def publish(self, w_path, path, desc):
ready_event = asyncio.Event(loop=self.loop)
@asyncio.coroutine
def on_ready(regh, status):
self.log.debug("Create element: %s, obj-type:%s obj:%s",
path, type(desc), desc)
with self.dts.transaction() as xact:
regh.create_element(path, desc, xact.xact)
self.log.debug("Created element: %s, obj:%s", path, desc)
ready_event.set()
handler = rift.tasklets.DTS.RegistrationHandler(
on_ready=on_ready
)
self.log.debug("Registering path: %s, obj:%s", w_path, desc)
reg = yield from self.dts.register(
w_path,
handler,
flags=rwdts.Flag.PUBLISHER | rwdts.Flag.NO_PREP_READ
)
self._registrations.append(reg)
self.log.debug("Registered path : %s", w_path)
yield from ready_event.wait()
return reg
def unpublish_all(self):
self.log.debug("Deregistering all published descriptors")
for reg in self._registrations:
reg.deregister()
class PingPongNsrConfigPublisher(object):
XPATH = "C,/nsr:ns-instance-config"
def __init__(self, log, loop, dts, ping_pong, cloud_account_name):
self.dts = dts
self.log = log
self.loop = loop
self.ref = None
self.querier = ManoQuerier(log, dts)
self.nsr_config = rwnsryang.YangData_Nsr_NsInstanceConfig()
nsr = rwnsryang.YangData_Nsr_NsInstanceConfig_Nsr()
nsr.id = str(uuid.uuid4())
nsr.name = "ns1.{}".format(nsr.id)
nsr.nsd = nsryang.YangData_Nsr_NsInstanceConfig_Nsr_Nsd()
nsr.nsd.from_dict(ping_pong.ping_pong_nsd.nsd.as_dict())
nsr.cloud_account = cloud_account_name
nsr.vnf_cloud_account_map.add().from_dict({
'member_vnf_index_ref': nsr.nsd.constituent_vnfd[0].member_vnf_index,
'config_agent_account': 'RiftCA',
#'cloud_account':'mock_account1'
})
inputs = nsryang.YangData_Nsr_NsInstanceConfig_Nsr_InputParameter()
inputs.xpath = "/nsd:nsd-catalog/nsd:nsd[nsd:id={}]/nsd:name".format(ping_pong.nsd_id)
inputs.value = "inigo montoya"
fast_cpu = {'metadata_key': 'FASTCPU', 'metadata_value': 'True'}
self.create_nsd_placement_group_map(nsr,
group_name = 'Orcus',
cloud_type = 'openstack',
construct_type = 'host_aggregate',
construct_value = [fast_cpu])
fast_storage = {'metadata_key': 'FASTSSD', 'metadata_value': 'True'}
self.create_nsd_placement_group_map(nsr,
group_name = 'Quaoar',
cloud_type = 'openstack',
construct_type = 'host_aggregate',
construct_value = [fast_storage])
fast_cpu = {'metadata_key': 'BLUE_HW', 'metadata_value': 'True'}
self.create_vnfd_placement_group_map(nsr,
group_name = 'Eris',
vnfd_id = ping_pong.ping_vnfd_id,
cloud_type = 'openstack',
construct_type = 'host_aggregate',
construct_value = [fast_cpu])
fast_storage = {'metadata_key': 'YELLOW_HW', 'metadata_value': 'True'}
self.create_vnfd_placement_group_map(nsr,
group_name = 'Weywot',
vnfd_id = ping_pong.pong_vnfd_id,
cloud_type = 'openstack',
construct_type = 'host_aggregate',
construct_value = [fast_storage])
nsr.input_parameter.append(inputs)
self._nsr = nsr
self.nsr_config.nsr.append(nsr)
self._ready_event = asyncio.Event(loop=self.loop)
asyncio.ensure_future(self.register(), loop=loop)
@asyncio.coroutine
def register(self):
@asyncio.coroutine
def on_ready(regh, status):
self._ready_event.set()
self.log.debug("Registering path: %s", PingPongNsrConfigPublisher.XPATH)
self.reg = yield from self.dts.register(
PingPongNsrConfigPublisher.XPATH,
flags=rwdts.Flag.PUBLISHER,
handler=rift.tasklets.DTS.RegistrationHandler(
on_ready=on_ready,
),
)
@asyncio.coroutine
def publish(self):
self.log.debug("Publishing NSR: {}".format(self.nsr_config))
yield from self._ready_event.wait()
with self.dts.transaction() as xact:
self.reg.create_element(
PingPongNsrConfigPublisher.XPATH,
self.nsr_config,
xact=xact.xact,
)
return self._nsr.id
@asyncio.coroutine
def create_scale_group_instance(self, group_name, index):
index = 1
scaling_group = self.nsr_config.nsr[0].scaling_group.add()
scaling_group.from_dict({
"scaling_group_name_ref": group_name,
"instance": [{"index": index}],
})
with self.dts.transaction() as xact:
self.reg.update_element(
PingPongNsrConfigPublisher.XPATH,
self.nsr_config,
xact=xact.xact,
)
return index
def create_nsd_placement_group_map(self,
nsr,
group_name,
cloud_type,
construct_type,
construct_value):
placement_group = nsr.nsd_placement_group_maps.add()
placement_group.from_dict({
"placement_group_ref" : group_name,
"cloud_type" : cloud_type,
construct_type : construct_value,
})
def create_vnfd_placement_group_map(self,
nsr,
group_name,
vnfd_id,
cloud_type,
construct_type,
construct_value):
placement_group = nsr.vnfd_placement_group_maps.add()
placement_group.from_dict({
"placement_group_ref" : group_name,
"vnfd_id_ref" : vnfd_id,
"cloud_type" : cloud_type,
construct_type : construct_value,
})
@asyncio.coroutine
def delete_scale_group_instance(self, group_name, index):
self.log.debug("Deleting scale group %s instance %s", group_name, index)
#del self.nsr_config.nsr[0].scaling_group[0].instance[0]
xpath = XPaths.nsr_scale_group_instance_config(self.nsr_config.nsr[0].id, group_name, index)
yield from self.dts.query_delete(xpath, flags=rwdts.XactFlag.ADVISE)
#with self.dts.transaction() as xact:
# self.reg.update_element(
# PingPongNsrConfigPublisher.XPATH,
# self.nsr_config,
# flags=rwdts.XactFlag.REPLACE,
# xact=xact.xact,
# )
def deregister(self):
if self.reg is not None:
self.reg.deregister()
def create_nsr_vl(self):
vld = self.nsr_config.nsr[0].nsd.vld.add()
vld.id = 'ping_pong_vld_2'
vld.name = 'ping_pong_vld_2' # hard coded
vld.short_name = vld.name
vld.vendor = 'RIFT.io'
vld.description = 'Toy VL'
vld.version = '1.0'
vld.type_yang = 'ELAN'
# cpref = vld.vnfd_connection_point_ref.add()
# cpref.member_vnf_index_ref = cp[0]
# cpref.vnfd_id_ref = cp[1]
# cpref.vnfd_connection_point_ref = cp[2]
vld = self.nsr_config.nsr[0].vl_cloud_account_map.add()
vld.vld_id_ref = 'ping_pong_vld_2'
vld.cloud_accounts = ["mock_account"]
@asyncio.coroutine
def add_nsr_vl(self):
self.create_nsr_vl()
yield from self.querier.update_nsr_config(
self.nsr_config.nsr[0].id,
self.nsr_config.nsr[0],
)
@asyncio.coroutine
def del_nsr_vl(self):
for vld in self.nsr_config.nsr[0].nsd.vld:
if vld.id == 'ping_pong_vld_2':
self.nsr_config.nsr[0].nsd.vld.remove(vld)
break
yield from self.querier.update_nsr_config(
self.nsr_config.nsr[0].id,
self.nsr_config.nsr[0],
)
def update_vnf_cloud_map(self,vnf_cloud_map):
self.log.debug("Modifying NSR to add VNF cloud account map: {}".format(vnf_cloud_map))
for vnf_index,cloud_acct in vnf_cloud_map.items():
vnf_maps = [vnf_map for vnf_map in self.nsr_config.nsr[0].vnf_cloud_account_map if vnf_index == vnf_map.member_vnf_index_ref]
if vnf_maps:
vnf_maps[0].cloud_account = cloud_acct
else:
self.nsr_config.nsr[0].vnf_cloud_account_map.add().from_dict({
'member_vnf_index_ref':vnf_index,
'cloud_account':cloud_acct
})
class PingPongDescriptorPublisher(object):
def __init__(self, log, loop, dts, num_external_vlrs=1, num_internal_vlrs=1, num_ping_vms=1):
self.log = log
self.loop = loop
self.dts = dts
self.querier = ManoQuerier(self.log, self.dts)
self.publisher = DescriptorPublisher(self.log, self.loop, self.dts)
self.ping_vnfd, self.pong_vnfd, self.ping_pong_nsd = \
ping_pong_nsd.generate_ping_pong_descriptors(
pingcount=1,
external_vlr_count=num_external_vlrs,
internal_vlr_count=num_internal_vlrs,
num_vnf_vms=2,
mano_ut=True,
use_scale_group=True,
use_mon_params=False,
)
self.config_dir = os.path.join(os.getenv('RIFT_ARTIFACTS'),
"launchpad/libs",
self.ping_pong_nsd.id,
"config")
@property
def nsd_id(self):
return self.ping_pong_nsd.id
@property
def ping_vnfd_id(self):
return self.ping_vnfd.id
@property
def pong_vnfd_id(self):
return self.pong_vnfd.id
@asyncio.coroutine
def publish_desciptors(self):
# Publish ping_vnfd
xpath = XPaths.vnfd(self.ping_vnfd_id)
xpath_wild = XPaths.vnfd()
for obj in self.ping_vnfd.descriptor.vnfd:
self.log.debug("Publishing ping_vnfd path: %s - %s, type:%s, obj:%s",
xpath, xpath_wild, type(obj), obj)
yield from self.publisher.publish(xpath_wild, xpath, obj)
# Publish pong_vnfd
xpath = XPaths.vnfd(self.pong_vnfd_id)
xpath_wild = XPaths.vnfd()
for obj in self.pong_vnfd.descriptor.vnfd:
self.log.debug("Publishing pong_vnfd path: %s, wild_path: %s, obj:%s",
xpath, xpath_wild, obj)
yield from self.publisher.publish(xpath_wild, xpath, obj)
# Publish ping_pong_nsd
xpath = XPaths.nsd(self.nsd_id)
xpath_wild = XPaths.nsd()
for obj in self.ping_pong_nsd.descriptor.nsd:
self.log.debug("Publishing ping_pong nsd path: %s, wild_path: %s, obj:%s",
xpath, xpath_wild, obj)
yield from self.publisher.publish(xpath_wild, xpath, obj)
self.log.debug("DONE - publish_desciptors")
def unpublish_descriptors(self):
self.publisher.unpublish_all()
@asyncio.coroutine
def delete_nsd(self):
yield from self.querier.delete_nsd(self.ping_pong_nsd.id)
@asyncio.coroutine
def delete_ping_vnfd(self):
yield from self.querier.delete_vnfd(self.ping_vnfd.id)
@asyncio.coroutine
def update_nsd(self):
yield from self.querier.update_nsd(
self.ping_pong_nsd.id,
self.ping_pong_nsd.descriptor.nsd[0]
)
@asyncio.coroutine
def update_ping_vnfd(self):
yield from self.querier.update_vnfd(
self.ping_vnfd.id,
self.ping_vnfd.descriptor.vnfd[0]
)
class ManoTestCase(rift.test.dts.AbstractDTSTest):
"""
DTS GI interface unittests
Note: Each tests uses a list of asyncio.Events for staging through the
test. These are required here because we are bring up each coroutine
("tasklet") at the same time and are not implementing any re-try
mechanisms. For instance, this is used in numerous tests to make sure that
a publisher is up and ready before the subscriber sends queries. Such
event lists should not be used in production software.
"""
@classmethod
def configure_suite(cls, rwmain):
vns_dir = os.environ.get('VNS_DIR')
vnfm_dir = os.environ.get('VNFM_DIR')
nsm_dir = os.environ.get('NSM_DIR')
rm_dir = os.environ.get('RM_DIR')
rwmain.add_tasklet(vns_dir, 'rwvnstasklet')
rwmain.add_tasklet(vnfm_dir, 'rwvnfmtasklet')
rwmain.add_tasklet(nsm_dir, 'rwnsmtasklet')
rwmain.add_tasklet(rm_dir, 'rwresmgrtasklet')
rwmain.add_tasklet(rm_dir, 'rwconmantasklet')
@classmethod
def configure_schema(cls):
return rwnsmyang.get_schema()
@classmethod
def configure_timeout(cls):
return 240
@staticmethod
def get_cal_account(account_type, account_name):
"""
Creates an object for class RwcalYang.Clo
"""
account = rwcloudyang.CloudAccount()
if account_type == 'mock':
account.name = account_name
account.account_type = "mock"
account.mock.username = "mock_user"
elif ((account_type == 'openstack_static') or (account_type == 'openstack_dynamic')):
account.name = account_name
account.account_type = 'openstack'
account.openstack.key = openstack_info['username']
account.openstack.secret = openstack_info['password']
account.openstack.auth_url = openstack_info['auth_url']
account.openstack.tenant = openstack_info['project_name']
account.openstack.mgmt_network = openstack_info['mgmt_network']
return account
@asyncio.coroutine
def configure_cloud_account(self, dts, cloud_type, cloud_name="cloud1"):
account = self.get_cal_account(cloud_type, cloud_name)
account_xpath = "C,/rw-cloud:cloud/rw-cloud:account[rw-cloud:name='{}']".format(cloud_name)
self.log.info("Configuring cloud-account: %s", account)
yield from dts.query_create(account_xpath,
rwdts.XactFlag.ADVISE,
account)
@asyncio.coroutine
def wait_tasklets(self):
yield from asyncio.sleep(5, loop=self.loop)
def configure_test(self, loop, test_id):
self.log.debug("STARTING - %s", self.id())
self.tinfo = self.new_tinfo(self.id())
self.dts = rift.tasklets.DTS(self.tinfo, self.schema, self.loop)
self.ping_pong = PingPongDescriptorPublisher(self.log, self.loop, self.dts)
self.querier = ManoQuerier(self.log, self.dts)
self.nsr_publisher = PingPongNsrConfigPublisher(
self.log,
loop,
self.dts,
self.ping_pong,
"mock_account",
)
def test_create_nsr_record(self):
@asyncio.coroutine
def verify_cm_state(termination=False, nsrid=None):
self.log.debug("Verifying cm_state path = %s", XPaths.cm_state(nsrid))
#print("###>>> Verifying cm_state path:", XPaths.cm_state(nsrid))
loop_count = 10
loop_sleep = 10
while loop_count:
yield from asyncio.sleep(loop_sleep, loop=self.loop)
loop_count -= 1
cm_nsr = None
cm_nsr_i = yield from self.querier.get_cm_state(nsr_id=nsrid)
if (cm_nsr_i is not None and len(cm_nsr_i) != 0):
self.assertEqual(1, len(cm_nsr_i))
cm_nsr = cm_nsr_i[0].as_dict()
#print("###>>> cm_nsr=", cm_nsr)
if termination:
if len(cm_nsr_i) == 0:
print("\n###>>> cm-state NSR deleted OK <<<###\n")
return
elif (cm_nsr is not None and
'state' in cm_nsr and
(cm_nsr['state'] == 'ready')):
self.log.debug("Got cm_nsr record %s", cm_nsr)
print("\n###>>> cm-state NSR 'ready' OK <<<###\n")
return
# if (len(cm_nsr_i) == 1 and cm_nsr_i[0].state == 'ready'):
# self.log.debug("Got cm_nsr record %s", cm_nsr)
# else:
# yield from asyncio.sleep(10, loop=self.loop)
print("###>>> Failed cm-state, termination:", termination)
self.assertEqual(1, loop_count)
@asyncio.coroutine
def verify_nsr_opdata(termination=False):
self.log.debug("Verifying nsr opdata path = %s", XPaths.nsr_opdata())
while True:
nsrs = yield from self.querier.get_nsr_opdatas()
if termination:
if len(nsrs) != 0:
for i in range(10):
nsrs = yield from self.querier.get_nsr_opdatas()
if len(nsrs) == 0:
self.log.debug("No active NSR records found. NSR termination successful")
return
else:
self.assertEqual(0, len(nsrs))
self.log.error("Active NSR records found. NSR termination failed")
else:
self.log.debug("No active NSR records found. NSR termination successful")
self.assertEqual(0, len(nsrs))
return
nsr = nsrs[0]
self.log.debug("Got nsr record %s", nsr)
if nsr.operational_status == 'running':
self.log.debug("!!! Rcvd NSR with running status !!!")
self.assertEqual("configuring", nsr.config_status)
break
self.log.debug("Rcvd NSR with %s status", nsr.operational_status)
self.log.debug("Sleeping for 10 seconds")
yield from asyncio.sleep(10, loop=self.loop)
@asyncio.coroutine
def verify_nsr_config(termination=False):
self.log.debug("Verifying nsr config path = %s", XPaths.nsr_config())
nsr_configs = yield from self.querier.get_nsr_configs()
self.assertEqual(1, len(nsr_configs))
nsr_config = nsr_configs[0]
self.assertEqual(
"/nsd:nsd-catalog/nsd:nsd[nsd:id={}]/nsd:name".format(self.ping_pong.nsd_id),
nsr_config.input_parameter[0].xpath,
)
@asyncio.coroutine
def verify_nsr_config_status(termination=False, nsrid=None):
if termination is False and nsrid is not None:
self.log.debug("Verifying nsr config status path = %s", XPaths.nsr_opdata(nsrid))
loop_count = 6
loop_sleep = 10
while loop_count:
loop_count -= 1
yield from asyncio.sleep(loop_sleep, loop=self.loop)
nsr_opdata_l = yield from self.querier.get_nsr_opdatas(nsrid)
self.assertEqual(1, len(nsr_opdata_l))
nsr_opdata = nsr_opdata_l[0].as_dict()
self.log.debug("NSR opdata: {}".format(nsr_opdata))
if ("configured" == nsr_opdata['config_status']):
print("\n###>>> NSR Config Status 'configured' OK <<<###\n")
return
self.assertEqual("configured", nsr_opdata['config_status'])
@asyncio.coroutine
def verify_vnfr_record(termination=False):
self.log.debug("Verifying vnfr record path = %s, Termination=%d",
XPaths.vnfr(), termination)
if termination:
for i in range(10):
vnfrs = yield from self.querier.get_vnfrs()
if len(vnfrs) == 0:
return True
for vnfr in vnfrs:
self.log.debug("VNFR still exists = %s", vnfr)
yield from asyncio.sleep(.5, loop=self.loop)
assert len(vnfrs) == 0
while True:
vnfrs = yield from self.querier.get_vnfrs()
if len(vnfrs) != 0 and termination is False:
vnfr = vnfrs[0]
self.log.debug("Rcvd VNFR with %s status", vnfr.operational_status)
if vnfr.operational_status == 'running':
self.log.debug("!!! Rcvd VNFR with running status !!!")
return True
elif vnfr.operational_status == "failed":
self.log.debug("!!! Rcvd VNFR with failed status !!!")
return False
self.log.debug("Sleeping for 10 seconds")
yield from asyncio.sleep(10, loop=self.loop)
@asyncio.coroutine
def verify_vnfr_cloud_account(vnf_index, cloud_account):
self.log.debug("Verifying vnfr record Cloud account for vnf index = %d is %s", vnf_index,cloud_account)
vnfrs = yield from self.querier.get_vnfrs()
cloud_accounts = [vnfr.cloud_account for vnfr in vnfrs if vnfr.member_vnf_index_ref == vnf_index]
self.log.debug("VNFR cloud account for index %d is %s", vnf_index,cloud_accounts[0])
assert cloud_accounts[0] == cloud_account
@asyncio.coroutine
def verify_vlr_record(termination=False):
vlr_xpath = XPaths.vlr()
self.log.debug("Verifying vlr record path = %s, termination: %s",
vlr_xpath, termination)
res_iter = yield from self.dts.query_read(vlr_xpath)
for i in res_iter:
result = yield from i
if termination:
self.assertIsNone(result)
self.log.debug("Got vlr record %s", result)
@asyncio.coroutine
def verify_vlrs(nsr_id, count=0):
while True:
nsrs = yield from self.querier.get_nsr_opdatas()
nsr = nsrs[0]
self.log.debug("Got nsr record %s", nsr)
if nsr.operational_status == 'running':
self.log.debug("!!! Rcvd NSR with running status !!!")
# Check the VLR count
if (len(nsr.vlr)) == count:
self.log.debug("NSR %s has %d VLRs", nsr_id, count)
break
self.log.debug("Rcvd NSR %s with %s status", nsr_id, nsr.operational_status)
self.log.debug("Sleeping for 10 seconds")
yield from asyncio.sleep(10, loop=self.loop)
@asyncio.coroutine
def verify_nsd_ref_count(termination):
self.log.debug("Verifying nsd ref count= %s", XPaths.nsd_ref_count())
res_iter = yield from self.dts.query_read(XPaths.nsd_ref_count())
for i in res_iter:
result = yield from i
self.log.debug("Got nsd ref count record %s", result)
@asyncio.coroutine
def verify_vnfd_ref_count(termination):
self.log.debug("Verifying vnfd ref count= %s", XPaths.vnfd_ref_count())
res_iter = yield from self.dts.query_read(XPaths.vnfd_ref_count())
for i in res_iter:
result = yield from i
self.log.debug("Got vnfd ref count record %s", result)
@asyncio.coroutine
def verify_scale_group_reaches_state(nsr_id, scale_group, index, state, timeout=1000):
start_time = time.time()
instance_state = None
while (time.time() - start_time) < timeout:
results = yield from self.querier.get_nsr_opdatas(nsr_id=nsr_id)
if len(results) == 1:
result = results[0]
if len(result.scaling_group_record) == 0:
continue
if len(result.scaling_group_record[0].instance) == 0:
continue
instance = result.scaling_group_record[0].instance[0]
self.assertEqual(instance.scaling_group_index_ref, index)
instance_state = instance.op_status
if instance_state == state:
self.log.debug("Scale group instance reached %s state", state)
return
yield from asyncio.sleep(1, loop=self.loop)
self.assertEqual(state, instance_state)
@asyncio.coroutine
def verify_results(termination=False, nsrid=None):
yield from verify_vnfr_record(termination)
#yield from verify_vlr_record(termination)
yield from verify_nsr_opdata(termination)
yield from verify_nsr_config(termination)
yield from verify_nsd_ref_count(termination)
yield from verify_vnfd_ref_count(termination)
# Config Manager
yield from verify_cm_state(termination, nsrid)
yield from verify_nsr_config_status(termination, nsrid)
@asyncio.coroutine
def verify_scale_instance(index):
self.log.debug("Verifying scale record path = %s, Termination=%d",
XPaths.vnfr(), termination)
if termination:
for i in range(5):
vnfrs = yield from self.querier.get_vnfrs()
if len(vnfrs) == 0:
return True
for vnfr in vnfrs:
self.log.debug("VNFR still exists = %s", vnfr)
assert len(vnfrs) == 0
while True:
vnfrs = yield from self.querier.get_vnfrs()
if len(vnfrs) != 0 and termination is False:
vnfr = vnfrs[0]
self.log.debug("Rcvd VNFR with %s status", vnfr.operational_status)
if vnfr.operational_status == 'running':
self.log.debug("!!! Rcvd VNFR with running status !!!")
return True
elif vnfr.operational_status == "failed":
self.log.debug("!!! Rcvd VNFR with failed status !!!")
return False
self.log.debug("Sleeping for 10 seconds")
yield from asyncio.sleep(10, loop=self.loop)
@asyncio.coroutine
def terminate_ns(nsr_id):
xpath = XPaths.nsr_config(nsr_id)
self.log.debug("Terminating network service with path %s", xpath)
yield from self.dts.query_delete(xpath, flags=rwdts.XactFlag.ADVISE)
self.log.debug("Terminated network service with path %s", xpath)
@asyncio.coroutine
def run_test():
yield from self.wait_tasklets()
cloud_type = "mock"
yield from self.configure_cloud_account(self.dts, cloud_type, "mock_account")
yield from self.configure_cloud_account(self.dts, cloud_type, "mock_account1")
yield from self.ping_pong.publish_desciptors()
# Attempt deleting VNFD not in use
yield from self.ping_pong.update_ping_vnfd()
# Attempt updating NSD not in use
yield from self.ping_pong.update_nsd()
# Attempt deleting VNFD not in use
yield from self.ping_pong.delete_ping_vnfd()
# Attempt deleting NSD not in use
yield from self.ping_pong.delete_nsd()
yield from self.ping_pong.publish_desciptors()
nsr_id = yield from self.nsr_publisher.publish()
yield from verify_results(nsrid=nsr_id)
# yield from self.nsr_publisher.create_scale_group_instance("ping_group", 1)
# yield from verify_scale_group_reaches_state(nsr_id, "ping_group", 1, "running")
# yield from self.nsr_publisher.delete_scale_group_instance("ping_group", 1)
yield from asyncio.sleep(10, loop=self.loop)
# Attempt deleting VNFD in use
yield from self.ping_pong.delete_ping_vnfd()
# Attempt updating NSD in use
yield from self.ping_pong.update_nsd()
# Update NSD in use with new VL
yield from self.nsr_publisher.add_nsr_vl()
# Verify the new VL has been added
yield from verify_vlrs(nsr_id, count=2)
# Delete the added VL
yield from self.nsr_publisher.del_nsr_vl()
# Verify the new VL has been added
yield from verify_vlrs(nsr_id, count=1)
# Attempt deleting NSD in use
yield from self.ping_pong.delete_nsd()
yield from terminate_ns(nsr_id)
yield from asyncio.sleep(25, loop=self.loop)
self.log.debug("Verifying termination results")
yield from verify_results(termination=True, nsrid=nsr_id)
self.log.debug("Verified termination results")
# Multi site NS case
self.log.debug("Testing multi site NS")
self.nsr_publisher.update_vnf_cloud_map({1:"mock_account1",2:"mock_account"})
nsr_id = yield from self.nsr_publisher.publish()
yield from verify_results(nsrid=nsr_id)
yield from verify_vnfr_cloud_account(1,"mock_account1")
yield from verify_vnfr_cloud_account(2,"mock_account")
yield from verify_vlrs(nsr_id, count=2)
yield from terminate_ns(nsr_id)
yield from asyncio.sleep(25, loop=self.loop)
self.log.debug("Verifying termination results for multi site NS")
yield from verify_results(termination=True, nsrid=nsr_id)
self.log.debug("Verified termination results for multi site NS")
self.log.debug("Attempting to delete VNFD for real")
yield from self.ping_pong.delete_ping_vnfd()
self.log.debug("Attempting to delete NSD for real")
yield from self.ping_pong.delete_nsd()
future = asyncio.ensure_future(run_test(), loop=self.loop)
self.run_until(future.done)
if future.exception() is not None:
self.log.error("Caught exception during test")
raise future.exception()
def main():
plugin_dir = os.path.join(os.environ["RIFT_INSTALL"], "usr/lib/rift/plugins")
if 'VNS_DIR' not in os.environ:
os.environ['VNS_DIR'] = os.path.join(plugin_dir, 'rwvns')
if 'VNFM_DIR' not in os.environ:
os.environ['VNFM_DIR'] = os.path.join(plugin_dir, 'rwvnfm')
if 'NSM_DIR' not in os.environ:
os.environ['NSM_DIR'] = os.path.join(plugin_dir, 'rwnsm')
if 'RM_DIR' not in os.environ:
os.environ['RM_DIR'] = os.path.join(plugin_dir, 'rwresmgrtasklet')
runner = xmlrunner.XMLTestRunner(output=os.environ["RIFT_MODULE_TEST"])
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument('-n', '--no-runner', action='store_true')
args, unittest_args = parser.parse_known_args()
if args.no_runner:
runner = None
ManoTestCase.log_level = logging.DEBUG if args.verbose else logging.WARN
unittest.main(testRunner=runner, argv=[sys.argv[0]] + unittest_args)
if __name__ == '__main__':
main()
# vim: sw=4
| [
"[email protected]"
]
| |
110496e18fa67c64c20bfd271e9accc1b77ca647 | 615e9d142587c965d4f593ce68cae1811824026d | /19-functions/javoblar-19-07.py | 3078014c425e95b4785cee83aa845fd53d1e7442 | []
| no_license | XurshidbekDavronov/python-darslar | 0100bb8ea61c355949e81d1d3f3b923befeb80c9 | 4fcf9a3e0c2facdedaed9b53ef806cdc0095fd9d | refs/heads/main | 2023-06-21T03:33:19.509225 | 2021-07-13T13:04:56 | 2021-07-13T13:04:56 | 377,176,205 | 1 | 0 | null | 2021-06-15T13:40:33 | 2021-06-15T13:40:32 | null | UTF-8 | Python | false | false | 510 | py | """
16/12/2020
Dasturlash asoslari
#19-dars: FUNCTIONS (FUNKSIYALAR)
Muallif: Anvar Narzullaev
Web sahifa: https://python.sariq.dev
"""
# Foydalanuvchidan son qabul qilib, sonni 2, 3, 4 va 5 ga qoldiqsiz bo'linishini tekshiruvchi
# funksiya yozing.
# Natijalarni konsolga chiqaring ("15 soni 3 ga qoldiqsiz bo'linadi" ko'rinishida)
def bolinish_alomatlari(son):
for n in range(2,11):
if not son%n:
print(f"{son} {n} ga qoldiqsiz bo'linadi")
bolinish_alomatlari(20)
| [
"[email protected]"
]
| |
804861121ec5dd38d2d654fa3b12e263b371c486 | fa346a2d5886420e22707a7be03599e634b230a9 | /temboo/Library/Amazon/IAM/__init__.py | 59787a0664534645fc9a01dd8d74b838ef9e46c0 | []
| no_license | elihuvillaraus/entity-resolution | cebf937499ed270c3436b1dd25ab4aef687adc11 | 71dd49118a6e11b236861289dcf36436d31f06bc | refs/heads/master | 2021-12-02T17:29:11.864065 | 2014-01-08T04:29:30 | 2014-01-08T04:29:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,056 | py | from ChangePassword import *
from UpdateGroup import *
from CreateAccessKey import *
from ListRolePolicies import *
from UpdateLoginProfile import *
from GetUserPolicy import *
from UpdateServerCertificate import *
from DeleteServerCertificate import *
from DeactivateMFADevice import *
from UpdateAccountPasswordPolicy import *
from ListAccessKeys import *
from DeleteAccessKey import *
from GetUser import *
from DeleteInstanceProfile import *
from RemoveUserFromGroup import *
from DeleteGroup import *
from GetAccountPasswordPolicy import *
from CreateUser import *
from ListInstanceProfilesForRole import *
from ListGroups import *
from ResyncMFADevice import *
from GetAccountSummary import *
from ListMFADevices import *
from CreateGroup import *
from DeleteGroupPolicy import *
from CreateLoginProfile import *
from GetLoginProfile import *
from DeleteRolePolicy import *
from GetRole import *
from GetGroupPolicy import *
from ListUsers import *
from EnableMFADevice import *
from ListVirtualMFADevices import *
from DeleteRole import *
from UpdateAccessKey import *
from ListUserPolicies import *
from UploadSigningCertificate import *
from RemoveRoleFromInstanceProfile import *
from AddUserToGroup import *
from ListServerCertificates import *
from GetServerCertificate import *
from ListInstanceProfiles import *
from CreateInstanceProfile import *
from ListSigningCertificates import *
from AddRoleToInstanceProfile import *
from CreateAccountAlias import *
from ListGroupPolicies import *
from ListRoles import *
from ListGroupsForUser import *
from UpdateSigningCertificate import *
from DeleteAccountAlias import *
from ListAccountAliases import *
from DeleteUser import *
from DeleteAccountPasswordPolicy import *
from DeleteLoginProfile import *
from UploadServerCertificate import *
from GetInstanceProfile import *
from UpdateUser import *
from DeleteUserPolicy import *
from DeleteSigningCertificate import *
from GetRolePolicy import *
from GetGroup import *
from DeleteVirtualMFADevice import *
from CreateVirtualMFADevice import *
| [
"[email protected]"
]
| |
22254545f9a1cc0c5bd2eb4c3f056ed34bc7a22d | bcddca991afe606180dbb5ce6c033d8fb611154c | /docs/idf_extensions/include_build_file.py | b11a2128667b50bd2c713b8038e7b3dbc90675fd | [
"Apache-2.0"
]
| permissive | EmbeddedSystemClass/esp-idf | 8ac5a312be41936b1e2dc5c68b7b68c9b4c1e488 | 92db6a3dabc1106b72865b8bd91d9bdd54fbdf6c | refs/heads/master | 2022-12-31T19:57:49.052365 | 2020-10-22T19:19:01 | 2020-10-22T19:19:01 | 259,859,439 | 0 | 0 | Apache-2.0 | 2020-04-29T07:47:48 | 2020-04-29T07:47:47 | null | UTF-8 | Python | false | false | 764 | py | import os.path
from docutils.parsers.rst import directives
from docutils.parsers.rst.directives.misc import Include as BaseInclude
from sphinx.util.docutils import SphinxDirective
class IncludeBuildFile(BaseInclude, SphinxDirective):
"""
Like the standard "Include" directive, but relative to the app
build directory
"""
def run(self):
abspath = os.path.join(self.env.config.build_dir, self.arguments[0])
self.arguments[0] = abspath
self.env.note_included(abspath)
return super(IncludeBuildFile, self).run()
def setup(app):
directives.register_directive('include-build-file', IncludeBuildFile)
return {'parallel_read_safe': True, 'parallel_write_safe': True, 'version': '0.1'}
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.