blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
470ce8862eb06700eb7ec3afcc26e0ae4b1527eb | fd7ed3266a6f802289508e5721b2bcd354763748 | /notification_manager.py | fc2cf1ea95668dd6ad20c23f5b0c8a9269fcf244 | [] | no_license | ec500-software-engineering/exercise-1-modularity-jbw0410 | 817628ca81cb784c6da372ed3637bace4f36180b | 9bbc6beb322c245fd4bca1d2d355861fd16d9d98 | refs/heads/master | 2020-04-22T02:57:27.675676 | 2019-02-14T19:28:11 | 2019-02-14T19:28:11 | 170,068,823 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,273 | py | from common_types import MessageUrgency
class NotificationManager(object):
"""
:developer: Josh
This class uses urgency to dictate which messaging mediums get used
LOW_URGENCY = 0
MEDIUM_URGENCY = 1
HIGH_URGENCY = 2
"""
def __init__(self, main_contact):
self._main_contact = main_contact
def send_message(self, msg):
raise NotImplementedError
class FlexibleNotificationManager(NotificationManager):
def __init__(self, main_contact, sms_sender, telegram_sender, email_sender):
super().__init__(main_contact)
self._sms_sender = sms_sender
self._telegram_sender = telegram_sender
self._email_sender = email_sender
def send_message(self, msg):
if msg.get_urgency() == MessageUrgency.HIGH_URGENCY:
self._sms_sender.send_notification(msg, self._main_contact)
self._telegram_sender.send_notification(msg, self._main_contact)
elif msg.get_urgency() == MessageUrgency.MEDIUM_URGENCY:
self._telegram_sender.send_notification(msg, self._main_contact)
elif msg.get_urgency() == MessageUrgency.LOW_URGENCY:
self._email_sender.send_notification(msg, self._main_contact) | [
"[email protected]"
] | |
4a4353352eab6a8cc98894df3052e0b68d2b8232 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_121/ch35_2020_09_21_13_25_01_007144.py | 62b79e7010a777c8f5bf1e3e2107e925f0d9c7cc | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | num=int(input('Digite seu número para adicionar: '))
while num!=0:
numero=int(input('Digite seu número para adicionar ou 0 para somar: '))
if numero==0:
print(num)
break
else:
num+=numero | [
"[email protected]"
] | |
2dc0b1f1f82ad169a5d289da8a02dec624e5d2d1 | 92a619c043e0c26fb65e58619a0e1c5090a9efe0 | /Useful_Code_Snippets/pramp_k_messed_array_sort.py | 17542c02521c6a1bc4bb97355d7544669599c472 | [] | no_license | curieshicy/My_Utilities_Code | 39150171f8e0aa4971cfc3d7adb32db7f45e6733 | 8b14a5c1112794d3451486c317d5e3c73efcd3b5 | refs/heads/master | 2022-06-22T06:06:39.901008 | 2022-06-20T16:00:51 | 2022-06-20T16:00:51 | 177,379,409 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 647 | py | '''
0 1 2 3 4 5 6 7 8 9
arr = [1, 4, 5, 2, 3, 7, 8, 6, 10, 9] k = 2
i i
'''
import heapq
def sort_k_messed_array(arr, k):
heap = [i for i in arr[:k + 1]]
heapq.heapify(heap)
res = []
i = k + 1
while heap and i < len(arr):
min_val = heapq.heappop(heap)
res.append(min_val)
heapq.heappush(heap, arr[i])
i += 1
while heap:
min_val = heapq.heappop(heap)
res.append(min_val)
return res
arr = [1, 4, 5, 2, 3, 7, 8, 6, 10, 9]
k = 2
print (sort_k_messed_array(arr, k))
| [
"[email protected]"
] | |
2fb286b4006fe0afb28353f2573285435e790b8a | 194a1e2ac246c5f9926b014c00d4c733f0cdaf0c | /btcgreen/wallet/did_wallet/did_info.py | 553e00112df673b80a38510c0e18345282c7f978 | [
"Apache-2.0"
] | permissive | chia-os/btcgreen-blockchain | 03e889cd0268284b7673917ab725ad71f980b650 | 2688e74de423ec59df302299e993b4674d69489e | refs/heads/main | 2023-08-29T14:40:11.962821 | 2021-08-17T06:33:34 | 2021-08-17T06:33:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 926 | py | from dataclasses import dataclass
from typing import List, Optional, Tuple
from btcgreen.types.blockchain_format.sized_bytes import bytes32
from btcgreen.util.ints import uint64
from btcgreen.util.streamable import streamable, Streamable
from btcgreen.wallet.lineage_proof import LineageProof
from btcgreen.types.blockchain_format.program import Program
from btcgreen.types.blockchain_format.coin import Coin
@dataclass(frozen=True)
@streamable
class DIDInfo(Streamable):
origin_coin: Optional[Coin] # puzzlehash of this coin is our DID
backup_ids: List[bytes]
num_of_backup_ids_needed: uint64
parent_info: List[Tuple[bytes32, Optional[LineageProof]]] # {coin.name(): LineageProof}
current_inner: Optional[Program] # represents a Program as bytes
temp_coin: Optional[Coin] # partially recovered wallet uses these to hold info
temp_puzhash: Optional[bytes32]
temp_pubkey: Optional[bytes]
| [
"[email protected]"
] | |
d1f5e089482a623098d9cb6844fe079e8a317992 | 707d67f58b55cae19d3b1431d3c1fb2d5f283800 | /withoutrest/test.py | 7440a0d374410355bceeefda45974cbe4c0cced9 | [] | no_license | lipun111/API | c52954cfd604ee567e4033ba971ca2f2fd8fb48d | 57e66c74ef8e0bb519fe9ba316c1cb2503d901ad | refs/heads/master | 2021-01-06T08:16:05.342444 | 2020-02-23T10:32:49 | 2020-02-23T10:32:49 | 241,257,877 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 341 | py | import requests
BASE_URL='http://127.0.0.1:8000/'
ENDPOINT='apijson'
resp = requests.get(BASE_URL+ENDPOINT)
data = resp.json()
print('Data From Django Application:')
print('#'*50)
print('Employee Number:', data['eno'])
print('Employee Name:', data['ename'])
print('Employee Salary:', data['esal'])
print('Employee Address:', data['eaddr'])
| [
"[email protected]"
] | |
e94e070e67a00687f1e3cae94b5554d4983e1d11 | 10cb11f83e1c8b51b9d72c28d6259a56ff1a97c8 | /samcli/commands/pipeline/init/pipeline_templates_manifest.py | e9729511faa9af0042539a4c326e6ef6b34eece4 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"BSD-2-Clause"
] | permissive | aws/aws-sam-cli | 6d4411aacf7f861e75e5cf4882a32858797a276d | b297ff015f2b69d7c74059c2d42ece1c29ea73ee | refs/heads/develop | 2023-08-30T23:28:36.179932 | 2023-08-30T21:58:26 | 2023-08-30T21:58:26 | 92,205,085 | 1,402 | 470 | Apache-2.0 | 2023-09-14T21:14:23 | 2017-05-23T18:16:23 | Python | UTF-8 | Python | false | false | 2,398 | py | """
Represents a manifest that lists the available SAM pipeline templates.
Example:
providers:
- displayName:Jenkins
id: jenkins
- displayName:Gitlab CI/CD
id: gitlab
- displayName:Github Actions
id: github-actions
templates:
- displayName: jenkins-two-environments-pipeline
provider: Jenkins
location: templates/cookiecutter-jenkins-two-environments-pipeline
- displayName: gitlab-two-environments-pipeline
provider: Gitlab
location: templates/cookiecutter-gitlab-two-environments-pipeline
- displayName: Github-Actions-two-environments-pipeline
provider: Github Actions
location: templates/cookiecutter-github-actions-two-environments-pipeline
"""
from pathlib import Path
from typing import Dict, List
import yaml
from samcli.commands.exceptions import AppPipelineTemplateManifestException
from samcli.yamlhelper import parse_yaml_file
class Provider:
"""CI/CD system such as Jenkins, Gitlab and GitHub-Actions"""
def __init__(self, manifest: Dict) -> None:
self.id: str = manifest["id"]
self.display_name: str = manifest["displayName"]
class PipelineTemplateMetadata:
"""The metadata of a Given pipeline template"""
def __init__(self, manifest: Dict) -> None:
self.display_name: str = manifest["displayName"]
self.provider: str = manifest["provider"]
self.location: str = manifest["location"]
class PipelineTemplatesManifest:
"""The metadata of the available CI/CD systems and the pipeline templates"""
def __init__(self, manifest_path: Path) -> None:
try:
manifest: Dict = parse_yaml_file(file_path=str(manifest_path))
self.providers: List[Provider] = list(map(Provider, manifest["providers"]))
self.templates: List[PipelineTemplateMetadata] = list(map(PipelineTemplateMetadata, manifest["templates"]))
except (FileNotFoundError, KeyError, TypeError, yaml.YAMLError) as ex:
raise AppPipelineTemplateManifestException(
"SAM pipeline templates manifest file is not found or ill-formatted. This could happen if the file "
f"{manifest_path} got deleted or modified."
"If you believe this is not the case, please file an issue at https://github.com/aws/aws-sam-cli/issues"
) from ex
| [
"[email protected]"
] | |
d2ad9b7101aa89fa36b056759a88442ffeb4d1d3 | 349949e6652378606e60a72263ad8afd32a59259 | /debit_credit_note/wizard/account_invoice_debit.py | 7557809b36c35d21848e4dd7760868df84e45d21 | [] | no_license | Parkash067/custom_ci_addons | 54180c68ab95dd0f0ed49b9caae10f7e2b950425 | 491f8783cc7c1ff98e2648994b9320cefa4c327b | refs/heads/master | 2021-04-12T10:02:18.091495 | 2018-07-22T20:50:09 | 2018-07-22T20:50:09 | 126,322,636 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 12,468 | py | # coding: utf-8
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
class AccountInvoiceDebit(osv.TransientModel):
"""Debits Note from Invoice"""
_name = "account.invoice.debit"
_description = "Invoice Debit Note"
_columns = {
'date': fields.date('Operation date',
help='This date will be used as the invoice date '
'for Refund Invoice and Period will be '
'chosen accordingly!'),
'period': fields.many2one('account.period', 'Force period'),
'journal_id': fields.many2one('account.journal',
'Refund Journal',
help='You can select here the journal '
'to use for the refund invoice '
'that will be created. If you '
'leave that field empty, it will '
'use the same journal as the '
'current invoice.'),
'description': fields.char('Description', size=128, required=True),
'comment': fields.text('Comment', required=True),
}
def _get_journal(self, cr, uid, context=None):
obj_journal = self.pool.get('account.journal')
user_obj = self.pool.get('res.users')
if context is None:
context = {}
inv_type = context.get('type', 'out_invoice')
company_id = user_obj.browse(
cr, uid, uid, context=context).company_id.id
type = (inv_type == 'out_invoice') and 'sale_refund' or \
(inv_type == 'out_refund') and 'sale' or \
(inv_type == 'in_invoice') and 'purchase_refund' or \
(inv_type == 'in_refund') and 'purchase'
journal = obj_journal.search(cr, uid, [('type', '=', type), (
'company_id', '=', company_id)], limit=1, context=context)
return journal and journal[0] or False
_defaults = {
'date': lambda *a: time.strftime('%Y-%m-%d'),
'journal_id': _get_journal,
}
def fields_view_get(self, cr, uid, view_id=None, view_type=False,
context=None, toolbar=False, submenu=False):
if context is None:
context = {}
journal_obj = self.pool.get('account.journal')
res = super(AccountInvoiceDebit, self).fields_view_get(
cr, uid, view_id=view_id, view_type=view_type, context=context,
toolbar=toolbar, submenu=submenu)
# Debit note only from customer o purchase invoice
# type = context.get('journal_type', 'sale_refund')
type = context.get('journal_type', 'sale')
if type in ('sale', 'sale_refund'):
type = 'sale'
else:
type = 'purchase'
for field in res['fields']:
if field == 'journal_id':
journal_select = journal_obj._name_search(cr, uid, '', [(
'type', '=', type)], context=context, limit=None,
name_get_uid=1)
res['fields'][field]['selection'] = journal_select
return res
def _get_period(self, cr, uid, context={}):
"""Return default account period value
"""
account_period_obj = self.pool.get('account.period')
ids = account_period_obj.find(cr, uid, context=context)
period_id = False
if ids:
period_id = ids[0]
return period_id
def _get_orig(self, cr, uid, inv, ref, context={}):
"""Return default origin value
"""
nro_ref = ref
if inv.type == 'out_invoice':
nro_ref = inv.number
orig = _('INV:') + (nro_ref or '') + _('- DATE:') + (
inv.date_invoice or '') + (' TOTAL:' + str(inv.amount_total) or '')
return orig
def compute_debit(self, cr, uid, ids, context=None):
"""@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: the account invoice refund’s ID or list of IDs
"""
inv_obj = self.pool.get('account.invoice')
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
inv_tax_obj = self.pool.get('account.invoice.tax')
inv_line_obj = self.pool.get('account.invoice.line')
res_users_obj = self.pool.get('res.users')
if context is None:
context = {}
for form in self.browse(cr, uid, ids, context=context):
created_inv = []
date = False
period = False
description = False
company = res_users_obj.browse(
cr, uid, uid, context=context).company_id
journal_id = form.journal_id.id
for inv in inv_obj.browse(cr, uid, context.get('active_ids'),
context=context):
if inv.state in ['draft', 'proforma2', 'cancel']:
raise osv.except_osv(_('Error !'), _(
'Can not create a debit note from '
'draft/proforma/cancel invoice.'))
if inv.reconciled:
raise osv.except_osv(_('Error!'), _(
'Cannot %s invoice which is already reconciled, '
'invoice should be unreconciled first. You can only '
'refund this invoice.'))
if form.period.id:
period = form.period.id
else:
# Take period from the current date
# period = inv.period_id and inv.period_id.id or False
period = self._get_period(cr, uid, context)
if not journal_id:
journal_id = inv.journal_id.id
if form.date:
date = form.date
if not form.period.id:
cr.execute("select name from ir_model_fields \
where model = 'account.period' \
and name = 'company_id'")
result_query = cr.fetchone()
if result_query:
# in multi company mode
cr.execute("""select p.id from account_fiscalyear \
y, account_period p where y.id=p.fiscalyear_id \
and date(%s) between p.date_start AND \
p.date_stop and y.company_id = %s limit 1""",
(date, company.id,))
else:
# in mono company mode
cr.execute("""SELECT id
from account_period where date(%s)
between date_start AND date_stop \
limit 1 """, (date,))
res = cr.fetchone()
if res:
period = res[0]
else:
date = inv.date_invoice
if form.description:
description = form.description
else:
description = inv.name
if not period:
raise osv.except_osv(_('Insufficient Data!'),
_('No period found on the invoice.'))
# we get original data of invoice to create a new invoice that
# is the copy of the original
invoice = inv_obj.read(cr, uid, [inv.id],
['name', 'type', 'number', 'reference',
'comment', 'date_due', 'partner_id',
'partner_insite', 'partner_contact',
'partner_ref', 'payment_term',
'account_id', 'currency_id',
'invoice_line', 'tax_line',
'journal_id', 'period_id'],
context=context)
invoice = invoice[0]
del invoice['id']
invoice_lines = inv_line_obj.browse(
cr, uid, invoice['invoice_line'], context=context)
invoice_lines = inv_obj._refund_cleanup_lines(
cr, uid, invoice_lines, context=context)
tax_lines = inv_tax_obj.browse(
cr, uid, invoice['tax_line'], context=context)
tax_lines = inv_obj._refund_cleanup_lines(
cr, uid, tax_lines, context=context)
# Add origin, parent and comment values
orig = self._get_orig(cr, uid, inv, invoice[
'reference'], context)
invoice.update({
'type': inv.type == 'in_invoice' and 'in_refund' or
inv.type == 'out_invoice' and 'out_refund',
'date_invoice': date,
'state': 'draft',
'number': False,
'invoice_line': invoice_lines,
'tax_line': tax_lines,
'period_id': period,
'parent_id': inv.id,
'name': description,
'origin': orig,
'comment': form['comment']
})
# take the id part of the tuple returned for many2one fields
for field in ('partner_id', 'account_id', 'currency_id',
'payment_term', 'journal_id'):
invoice[field] = invoice[field] and invoice[field][0]
# create the new invoice
inv_id = inv_obj.create(cr, uid, invoice, {})
# we compute due date
if inv.payment_term.id:
data = inv_obj.onchange_payment_term_date_invoice(
cr, uid, [inv_id], inv.payment_term.id, date)
if 'value' in data and data['value']:
inv_obj.write(cr, uid, [inv_id], data['value'])
created_inv.append(inv_id)
# we get the view id
xml_id = (inv.type == 'out_refund') and 'action_invoice_tree1' or \
(inv.type == 'in_refund') and 'action_invoice_tree2' or \
(inv.type == 'out_invoice') and 'action_invoice_tree3' or \
(inv.type == 'in_invoice') and 'action_invoice_tree4'
# we get the model
result = mod_obj.get_object_reference(cr, uid, 'account', xml_id)
id = result and result[1] or False
# we read the act window
result = act_obj.read(cr, uid, id, context=context)
# we add the new invoices into domain list
invoice_domain = eval(result['domain'])
invoice_domain.append(('id', 'in', created_inv))
result['domain'] = invoice_domain
return result
def invoice_debit(self, cr, uid, ids, context=None):
return self.compute_debit(cr, uid, ids, context=context)
| [
"[email protected]"
] | |
ce7e0ede156a3e7de7e03da065961056d48b7863 | 148d623f5c85e8bb4cb736c1f20dfa6c15108ee0 | /notebooks/cross_validation_ex_01.py | 170688985da17b8a32c3299bb15f65b29d117fe5 | [
"CC-BY-4.0"
] | permissive | castorfou/scikit-learn-mooc | 996cec1a8c681c4fb87c568b2fa62d9ce57518f9 | 235748eff57409eb17d8355024579c6df44c0563 | refs/heads/master | 2023-06-02T16:31:26.987200 | 2021-06-24T09:54:56 | 2021-06-24T09:54:56 | 369,466,156 | 1 | 0 | CC-BY-4.0 | 2021-05-21T08:24:07 | 2021-05-21T08:24:06 | null | UTF-8 | Python | false | false | 6,263 | py | #!/usr/bin/env python
# coding: utf-8
# # 📝 Exercise M2.01
#
# The aim of this exercise is to make the following experiments:
#
# * train and test a support vector machine classifier through
# cross-validation;
# * study the effect of the parameter gamma of this classifier using a
# validation curve;
# * study if it would be useful in term of classification if we could add new
# samples in the dataset using a learning curve.
#
# To make these experiments we will first load the blood transfusion dataset.
# <div class="admonition note alert alert-info">
# <p class="first admonition-title" style="font-weight: bold;">Note</p>
# <p class="last">If you want a deeper overview regarding this dataset, you can refer to the
# Appendix - Datasets description section at the end of this MOOC.</p>
# </div>
# In[14]:
import pandas as pd
blood_transfusion = pd.read_csv("../datasets/blood_transfusion.csv")
data = blood_transfusion.drop(columns="Class")
target = blood_transfusion["Class"]
# We will use a support vector machine classifier (SVM). In its most simple
# form, a SVM classifier is a linear classifier behaving similarly to a
# logistic regression. Indeed, the optimization used to find the optimal
# weights of the linear model are different but we don't need to know these
# details for the exercise.
#
# Also, this classifier can become more flexible/expressive by using a
# so-called kernel making the model becomes non-linear. Again, no requirement
# regarding the mathematics is required to accomplish this exercise.
#
# We will use an RBF kernel where a parameter `gamma` allows to tune the
# flexibility of the model.
#
# First let's create a predictive pipeline made of:
#
# * a [`sklearn.preprocessing.StandardScaler`](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html)
# with default parameter;
# * a [`sklearn.svm.SVC`](https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html)
# where the parameter `kernel` could be set to `"rbf"`. Note that this is the
# default.
# In[15]:
# to display nice model diagram
from sklearn import set_config
set_config(display='diagram')
# In[16]:
# Write your code here.
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.pipeline import make_pipeline
model = make_pipeline(StandardScaler(), SVC(kernel='rbf'))
model
# Evaluate the statistical performance of your model by cross-validation with a
# `ShuffleSplit` scheme. Thus, you can use
# [`sklearn.model_selection.cross_validate`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.cross_validate.html)
# and pass a [`sklearn.model_selection.ShuffleSplit`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.ShuffleSplit.html)
# to the `cv` parameter. Only fix the `random_state=0` in the `ShuffleSplit`
# and let the other parameters to the default.
# In[17]:
# Write your code here.
import pandas as pd
from sklearn.model_selection import cross_validate, ShuffleSplit
cv = ShuffleSplit(random_state=0)
cv_results = cross_validate(model, data, target,
cv=cv)
cv_results = pd.DataFrame(cv_results)
cv_results
# As previously mentioned, the parameter `gamma` is one of the parameter
# controlling under/over-fitting in support vector machine with an RBF kernel.
#
# Compute the validation curve
# (using [`sklearn.model_selection.validation_curve`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.validation_curve.html))
# to evaluate the effect of the parameter `gamma`. You can vary its value
# between `10e-3` and `10e2` by generating samples on a logarithmic scale.
# Thus, you can use `np.logspace(-3, 2, num=30)`.
#
# Since we are manipulating a `Pipeline` the parameter name will be set to
# `svc__gamma` instead of only `gamma`. You can retrieve the parameter name
# using `model.get_params().keys()`. We will go more into details regarding
# accessing and setting hyperparameter in the next section.
# In[18]:
# Write your code here.
from sklearn.model_selection import validation_curve
import numpy as np
gamma = np.logspace(-3, 2, num=30)
train_scores, test_scores = validation_curve(
model, data, target, param_name="svc__gamma", param_range=gamma,
cv=cv)
train_errors, test_errors = -train_scores, -test_scores
# Plot the validation curve for the train and test scores.
# In[22]:
# Write your code here.
import matplotlib.pyplot as plt
plt.errorbar(gamma, train_scores.mean(axis=1),yerr=train_scores.std(axis=1), label="Training error")
plt.errorbar(gamma, test_scores.mean(axis=1),yerr=test_scores.std(axis=1), label="Testing error")
plt.legend()
plt.xscale("log")
plt.xlabel("Gamma value for SVC")
plt.ylabel("Mean absolute error")
_ = plt.title("Validation curve for SVC")
# Now, you can perform an analysis to check whether adding new samples to the
# dataset could help our model to better generalize. Compute the learning curve
# (using [`sklearn.model_selection.learning_curve`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.learning_curve.html))
# by computing the train and test scores for different training dataset size.
# Plot the train and test scores with respect to the number of samples.
# In[12]:
# Write your code here.
from sklearn.model_selection import learning_curve
import numpy as np
train_sizes = np.linspace(0.1, 1.0, num=5, endpoint=True)
train_sizes
from sklearn.model_selection import ShuffleSplit
cv = ShuffleSplit(n_splits=30, test_size=0.2)
results = learning_curve(
model, data, target, train_sizes=train_sizes, cv=cv)
train_size, train_scores, test_scores = results[:3]
# Convert the scores into errors
train_errors, test_errors = -train_scores, -test_scores
import matplotlib.pyplot as plt
plt.errorbar(train_size, train_errors.mean(axis=1),
yerr=train_errors.std(axis=1), label="Training error")
plt.errorbar(train_size, test_errors.mean(axis=1),
yerr=test_errors.std(axis=1), label="Testing error")
plt.legend()
plt.xscale("log")
plt.xlabel("Number of samples in the training set")
plt.ylabel("Mean absolute error")
_ = plt.title("Learning curve for SVC")
# In[ ]:
| [
"[email protected]"
] | |
068373acc18149e0bd38c83af1aff1cc0290a92b | 519ffdab70c5cddc81da66af4c3ddd4007e637bc | /src/midterm/exam.py | 0dae271ee20bc47d14f446ea449af8dc42852649 | [
"MIT"
] | permissive | acc-cosc-1336/cosc-1336-spring-2018-Aronpond | 6b55ce38943117015a108b8a4544a22346eef670 | b37a6be8c0b909859ccf5ac2ce5eaf82c4ba741b | refs/heads/master | 2021-05-11T09:14:22.013454 | 2018-05-09T22:27:24 | 2018-05-09T22:27:24 | 118,071,080 | 0 | 0 | MIT | 2018-05-07T06:09:48 | 2018-01-19T03:22:41 | Python | UTF-8 | Python | false | false | 3,079 | py | '''
5 points
Create a function named get_miles_per_hour with parameters kilometers and minutes that returns
the miles per hour.
Use .621371 as conversion ratio.
Return the string error 'Invalid arguments' if negative kilometers or minutes are given.
RUN THE PROVIDED TEST CASES TO VALIDATE YOUR CODE
'''
def get_miles_per_hour(kilometers,minutes):
hours = minutes / 60
miles = kilometers * .621371
mph = miles / hours
if kilometers < 0:
return 'Invalid arguments'
if minutes < 0:
return 'Invalid arguments'
return mph
'''
10 points
Create a function named get_bonus_pay_amount with parameter sales that returns the bonus pay amount.
Sales Range Percentage
0 to 499 5%
500 to 999 6%
1000 to 1499 7%
1500 to 1999 8%
Return the string error 'Invalid arguments' if sales amount less than 0 or greater than 1999
Sample Data sales amount:
1000
Return Value:
70
'''
def get_bonus_pay_amount(sales):
if sales >= 0 and sales <= 499:
return (round(sales *.05,2))
elif sales >= 500 and sales <= 999:
return (round(sales *.06,2))
elif sales >= 1000 and sales <= 1499:
return (round(sales *.07,2))
elif sales >= 1500 and sales <= 1999:
return (round(sales *.08,2))
elif sales < 0:
return 'Invalid arguments'
elif sales > 1999:
return 'Invalid arguments'
'''
10 points
Create a function named reverse_string that has one parameter named string1 that returns the
reverse of the string.
MUST USE A WHILE LOOP
DO NOT USE STRING SLICING!!!
Sample Data string1 argument:
My String Data
Returns:
ataD gnirtS yM
reverse_string('My String Data')
CREATE A TEST CASE IN THE exam_test.py file.
'''
def reverse_string(string1):
# return string1[::-1]
rev = ''
i = len(string1)
while i > 0:
rev += string1[i-1]
i = i - 1
return rev
'''
10 points
Create a function named get_list_min_max with a list1 parameter that returns the min and max values
in a list.
Sample Data list1 value:
['joe', 10, 15, 20, 30, 40]
Returns:
[10, 40]
get_list_min_max(['joe', 10, 15, 20, 30, 40])
CREATE A TEST CASE IN THE exam_test.py file.
'''
def get_list_min_max(list1):
rl = []
maxv = max(list1[1:])
minv = min(list1[1:])
rl.append(minv)
rl.append(maxv)
return rl
'''
25 points
Create a function named get_list_min_max_file with no parameters that reads the attached quiz.dat file
that returns all the min and max values from multiple lists.
You can use the get_list_min_max to get the min max for each list.
Sample quiz.dat file data:
joe 10 15 20 30 40
bill 23 16 19 22
sue 8 22 17 14 32 17 24 21 2 9 11 17
grace 12 28 21 45 26 10 11
john 14 32 25 16 89
Return Value:
[2,89]
'''
def get_list_min_max_file():
infile = open('quiz.dat','r')
lis = infile.readlines()
infile.close()
lis.rstrip('/n')
print(lis)
# print(line)
# maxv = max(line.isnum)
# minv = min(line)
# print(maxv)
# print(minv)
| [
"[email protected]"
] | |
23f9757ad8e9bebd6108e84a02155e77f6b93f00 | 91d1a6968b90d9d461e9a2ece12b465486e3ccc2 | /secretsmanager_write_1/secret_create.py | 7e5010bb0e9275c9dd9b04a37ca96724ca1518c4 | [] | no_license | lxtxl/aws_cli | c31fc994c9a4296d6bac851e680d5adbf7e93481 | aaf35df1b7509abf5601d3f09ff1fece482facda | refs/heads/master | 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,892 | py | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_one_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/secretsmanager/create-secret.html
if __name__ == '__main__':
"""
delete-secret : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/secretsmanager/delete-secret.html
describe-secret : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/secretsmanager/describe-secret.html
list-secrets : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/secretsmanager/list-secrets.html
restore-secret : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/secretsmanager/restore-secret.html
rotate-secret : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/secretsmanager/rotate-secret.html
update-secret : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/secretsmanager/update-secret.html
"""
parameter_display_string = """
# name : Specifies the friendly name of the new secret.
The secret name must be ASCII letters, digits, or the following characters : /_+=.@-
Note
Do not end your secret name with a hyphen followed by six characters. If you do so, you risk confusion and unexpected results when searching for a secret by partial ARN. Secrets Manager automatically adds a hyphen and six random characters at the end of the ARN.
"""
add_option_dict = {}
#######################################################################
# parameter display string
add_option_dict["parameter_display_string"] = parameter_display_string
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
write_one_parameter("secretsmanager", "create-secret", "name", add_option_dict)
| [
"[email protected]"
] | |
5a164ae6608208666333aadb44adb89d47eb77e1 | e97e727972149063b3a1e56b38961d0f2f30ed95 | /test/test_activity_occurrence_resource.py | 95329d1bb8176f1da196bd830cac7f467f098361 | [] | no_license | knetikmedia/knetikcloud-python-client | f3a485f21c6f3e733a864194c9acf048943dece7 | 834a24415385c906732437970db105e1bc71bde4 | refs/heads/master | 2021-01-12T10:23:35.307479 | 2018-03-14T16:04:24 | 2018-03-14T16:04:24 | 76,418,830 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,097 | py | # coding: utf-8
"""
Knetik Platform API Documentation latest
This is the spec for the Knetik API. Use this in conjunction with the documentation found at https://knetikcloud.com.
OpenAPI spec version: latest
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import knetik_cloud
from knetik_cloud.rest import ApiException
from knetik_cloud.models.activity_occurrence_resource import ActivityOccurrenceResource
class TestActivityOccurrenceResource(unittest.TestCase):
""" ActivityOccurrenceResource unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testActivityOccurrenceResource(self):
"""
Test ActivityOccurrenceResource
"""
# FIXME: construct object with mandatory attributes with example values
#model = knetik_cloud.models.activity_occurrence_resource.ActivityOccurrenceResource()
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
282cd019ef9be186971ea4b2b29c2b727caf3c2f | ed454f31cf5a3d2605f275cc83ec82f34f06bb33 | /zerver/lib/events.py | d925e7e4d05427eeb4baa0c1793af046fdc88d8f | [
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] | permissive | 18-2-SKKU-OSS/2018-2-OSS-L5 | b62a3ce53eff63ed09395dc1f8296fef089d90e2 | 190bc3afbf973d5917e82ad9785d01b2ea1773f2 | refs/heads/master | 2020-04-08T11:44:14.468373 | 2018-12-11T04:35:30 | 2018-12-11T04:35:30 | 159,317,980 | 3 | 4 | Apache-2.0 | 2018-12-09T14:14:21 | 2018-11-27T10:30:18 | Python | UTF-8 | Python | false | false | 34,269 | py | # See https://zulip.readthedocs.io/en/latest/subsystems/events-system.html for
# high-level documentation on how this system works.
import copy
import ujson
from collections import defaultdict
from django.utils.translation import ugettext as _
from django.conf import settings
from importlib import import_module
from typing import (
cast, Any, Callable, Dict, Iterable, List, Optional, Sequence, Set, Tuple, Union
)
session_engine = import_module(settings.SESSION_ENGINE)
from zerver.lib.alert_words import user_alert_words
from zerver.lib.attachments import user_attachments
from zerver.lib.avatar import avatar_url, get_avatar_field
from zerver.lib.bot_config import load_bot_config_template
from zerver.lib.hotspots import get_next_hotspots
from zerver.lib.integrations import EMBEDDED_BOTS
from zerver.lib.message import (
aggregate_unread_data,
apply_unread_message_event,
get_raw_unread_data,
get_starred_message_ids,
)
from zerver.lib.narrow import check_supported_events_narrow_filter
from zerver.lib.push_notifications import push_notifications_enabled
from zerver.lib.soft_deactivation import maybe_catch_up_soft_deactivated_user
from zerver.lib.realm_icon import realm_icon_url
from zerver.lib.request import JsonableError
from zerver.lib.topic import TOPIC_NAME
from zerver.lib.topic_mutes import get_topic_mutes
from zerver.lib.actions import (
validate_user_access_to_subscribers_helper,
do_get_streams, get_default_streams_for_realm,
gather_subscriptions_helper, get_cross_realm_dicts,
get_status_dict, streams_to_dicts_sorted,
default_stream_groups_to_dicts_sorted,
get_owned_bot_dicts,
)
from zerver.lib.user_groups import user_groups_in_realm_serialized
from zerver.tornado.event_queue import request_event_queue, get_user_events
from zerver.models import Client, Message, Realm, UserPresence, UserProfile, CustomProfileFieldValue, \
get_user_profile_by_id, \
get_realm_user_dicts, realm_filters_for_realm, get_user,\
custom_profile_fields_for_realm, get_realm_domains, \
get_default_stream_groups, CustomProfileField, Stream
from zproject.backends import email_auth_enabled, password_auth_enabled
from version import ZULIP_VERSION
def get_raw_user_data(realm_id: int, client_gravatar: bool) -> Dict[int, Dict[str, str]]:
user_dicts = get_realm_user_dicts(realm_id)
# TODO: Consider optimizing this query away with caching.
custom_profile_field_values = CustomProfileFieldValue.objects.filter(user_profile__realm_id=realm_id)
profiles_by_user_id = defaultdict(dict) # type: Dict[int, Dict[str, Any]]
for profile_field in custom_profile_field_values:
user_id = profile_field.user_profile_id
profiles_by_user_id[user_id][profile_field.field_id] = profile_field.value
def user_data(row: Dict[str, Any]) -> Dict[str, Any]:
avatar_url = get_avatar_field(
user_id=row['id'],
realm_id= realm_id,
email=row['email'],
avatar_source=row['avatar_source'],
avatar_version=row['avatar_version'],
medium=False,
client_gravatar=client_gravatar,
)
is_admin = row['is_realm_admin']
is_guest = row['is_guest']
is_bot = row['is_bot']
# This format should align with get_cross_realm_dicts() and notify_created_user
result = dict(
email=row['email'],
user_id=row['id'],
avatar_url=avatar_url,
is_admin=is_admin,
is_guest=is_guest,
is_bot=is_bot,
full_name=row['full_name'],
timezone=row['timezone'],
is_active = row['is_active'],
date_joined = row['date_joined'].isoformat(),
)
if not is_bot:
result['profile_data'] = profiles_by_user_id.get(row['id'], {})
return result
return {
row['id']: user_data(row)
for row in user_dicts
}
def always_want(msg_type: str) -> bool:
'''
This function is used as a helper in
fetch_initial_state_data, when the user passes
in None for event_types, and we want to fetch
info for every event type. Defining this at module
level makes it easier to mock.
'''
return True
# Fetch initial data. When event_types is not specified, clients want
# all event types. Whenever you add new code to this function, you
# should also add corresponding events for changes in the data
# structures and new code to apply_events (and add a test in EventsRegisterTest).
def fetch_initial_state_data(user_profile: UserProfile,
event_types: Optional[Iterable[str]],
queue_id: str, client_gravatar: bool,
include_subscribers: bool = True) -> Dict[str, Any]:
state = {'queue_id': queue_id} # type: Dict[str, Any]
realm = user_profile.realm
if event_types is None:
# return True always
want = always_want # type: Callable[[str], bool]
else:
want = set(event_types).__contains__
if want('alert_words'):
state['alert_words'] = user_alert_words(user_profile)
if want('custom_profile_fields'):
fields = custom_profile_fields_for_realm(realm.id)
state['custom_profile_fields'] = [f.as_dict() for f in fields]
state['custom_profile_field_types'] = CustomProfileField.FIELD_TYPE_CHOICES_DICT
if want('hotspots'):
state['hotspots'] = get_next_hotspots(user_profile)
if want('message'):
# The client should use get_messages() to fetch messages
# starting with the max_message_id. They will get messages
# newer than that ID via get_events()
messages = Message.objects.filter(usermessage__user_profile=user_profile).order_by('-id')[:1]
if messages:
state['max_message_id'] = messages[0].id
else:
state['max_message_id'] = -1
if want('muted_topics'):
state['muted_topics'] = get_topic_mutes(user_profile)
if want('pointer'):
state['pointer'] = user_profile.pointer
if want('presence'):
state['presences'] = get_status_dict(user_profile)
if want('realm'):
for property_name in Realm.property_types:
state['realm_' + property_name] = getattr(realm, property_name)
# Most state is handled via the property_types framework;
# these manual entries are for those realm settings that don't
# fit into that framework.
state['realm_authentication_methods'] = realm.authentication_methods_dict()
state['realm_allow_message_editing'] = realm.allow_message_editing
state['realm_allow_community_topic_editing'] = realm.allow_community_topic_editing
state['realm_allow_message_deleting'] = realm.allow_message_deleting
state['realm_message_content_edit_limit_seconds'] = realm.message_content_edit_limit_seconds
state['realm_message_content_delete_limit_seconds'] = realm.message_content_delete_limit_seconds
state['realm_icon_url'] = realm_icon_url(realm)
state['realm_icon_source'] = realm.icon_source
state['max_icon_file_size'] = settings.MAX_ICON_FILE_SIZE
state['realm_bot_domain'] = realm.get_bot_domain()
state['realm_uri'] = realm.uri
state['realm_available_video_chat_providers'] = realm.VIDEO_CHAT_PROVIDERS
state['realm_presence_disabled'] = realm.presence_disabled
state['realm_digest_emails_enabled'] = realm.digest_emails_enabled and settings.SEND_DIGEST_EMAILS
state['realm_is_zephyr_mirror_realm'] = realm.is_zephyr_mirror_realm
state['realm_email_auth_enabled'] = email_auth_enabled(realm)
state['realm_password_auth_enabled'] = password_auth_enabled(realm)
state['realm_push_notifications_enabled'] = push_notifications_enabled()
if realm.notifications_stream and not realm.notifications_stream.deactivated:
notifications_stream = realm.notifications_stream
state['realm_notifications_stream_id'] = notifications_stream.id
else:
state['realm_notifications_stream_id'] = -1
signup_notifications_stream = realm.get_signup_notifications_stream()
if signup_notifications_stream:
state['realm_signup_notifications_stream_id'] = signup_notifications_stream.id
else:
state['realm_signup_notifications_stream_id'] = -1
if want('realm_domains'):
state['realm_domains'] = get_realm_domains(realm)
if want('realm_emoji'):
state['realm_emoji'] = realm.get_emoji()
if want('realm_filters'):
state['realm_filters'] = realm_filters_for_realm(realm.id)
if want('realm_user_groups'):
state['realm_user_groups'] = user_groups_in_realm_serialized(realm)
if want('realm_user'):
state['raw_users'] = get_raw_user_data(
realm_id=realm.id,
client_gravatar=client_gravatar,
)
# For the user's own avatar URL, we force
# client_gravatar=False, since that saves some unnecessary
# client-side code for handing medium-size avatars. See #8253
# for details.
state['avatar_source'] = user_profile.avatar_source
state['avatar_url_medium'] = avatar_url(
user_profile,
medium=True,
client_gravatar=False,
)
state['avatar_url'] = avatar_url(
user_profile,
medium=False,
client_gravatar=False,
)
state['can_create_streams'] = user_profile.can_create_streams()
state['can_subscribe_other_users'] = user_profile.can_subscribe_other_users()
state['cross_realm_bots'] = list(get_cross_realm_dicts())
state['is_admin'] = user_profile.is_realm_admin
state['is_guest'] = user_profile.is_guest
state['user_id'] = user_profile.id
state['enter_sends'] = user_profile.enter_sends
state['email'] = user_profile.email
state['delivery_email'] = user_profile.delivery_email
state['full_name'] = user_profile.full_name
if want('realm_bot'):
state['realm_bots'] = get_owned_bot_dicts(user_profile)
# This does not yet have an apply_event counterpart, since currently,
# new entries for EMBEDDED_BOTS can only be added directly in the codebase.
if want('realm_embedded_bots'):
realm_embedded_bots = []
for bot in EMBEDDED_BOTS:
realm_embedded_bots.append({'name': bot.name,
'config': load_bot_config_template(bot.name)})
state['realm_embedded_bots'] = realm_embedded_bots
if want('subscription'):
subscriptions, unsubscribed, never_subscribed = gather_subscriptions_helper(
user_profile, include_subscribers=include_subscribers)
state['subscriptions'] = subscriptions
state['unsubscribed'] = unsubscribed
state['never_subscribed'] = never_subscribed
if want('update_message_flags') and want('message'):
# Keeping unread_msgs updated requires both message flag updates and
# message updates. This is due to the fact that new messages will not
# generate a flag update so we need to use the flags field in the
# message event.
state['raw_unread_msgs'] = get_raw_unread_data(user_profile)
if want('starred_messages'):
state['starred_messages'] = get_starred_message_ids(user_profile)
if want('stream'):
state['streams'] = do_get_streams(user_profile)
state['stream_name_max_length'] = Stream.MAX_NAME_LENGTH
state['stream_description_max_length'] = Stream.MAX_DESCRIPTION_LENGTH
if want('default_streams'):
state['realm_default_streams'] = streams_to_dicts_sorted(
get_default_streams_for_realm(realm.id))
if want('default_stream_groups'):
state['realm_default_stream_groups'] = default_stream_groups_to_dicts_sorted(
get_default_stream_groups(realm))
if want('update_display_settings'):
for prop in UserProfile.property_types:
state[prop] = getattr(user_profile, prop)
state['emojiset_choices'] = user_profile.emojiset_choices()
if want('update_global_notifications'):
for notification in UserProfile.notification_setting_types:
state[notification] = getattr(user_profile, notification)
if want('zulip_version'):
state['zulip_version'] = ZULIP_VERSION
return state
def remove_message_id_from_unread_mgs(state: Dict[str, Dict[str, Any]],
message_id: int) -> None:
raw_unread = state['raw_unread_msgs']
for key in ['pm_dict', 'stream_dict', 'huddle_dict']:
raw_unread[key].pop(message_id, None)
raw_unread['unmuted_stream_msgs'].discard(message_id)
raw_unread['mentions'].discard(message_id)
def apply_events(state: Dict[str, Any], events: Iterable[Dict[str, Any]],
user_profile: UserProfile, client_gravatar: bool,
include_subscribers: bool = True,
fetch_event_types: Optional[Iterable[str]] = None) -> None:
for event in events:
if fetch_event_types is not None and event['type'] not in fetch_event_types:
# TODO: continuing here is not, most precisely, correct.
# In theory, an event of one type, e.g. `realm_user`,
# could modify state that doesn't come from that
# `fetch_event_types` value, e.g. the `our_person` part of
# that code path. But it should be extremely rare, and
# fixing that will require a nontrivial refactor of
# `apply_event`. For now, be careful in your choice of
# `fetch_event_types`.
continue
apply_event(state, event, user_profile, client_gravatar, include_subscribers)
def apply_event(state: Dict[str, Any],
event: Dict[str, Any],
user_profile: UserProfile,
client_gravatar: bool,
include_subscribers: bool) -> None:
if event['type'] == "message":
state['max_message_id'] = max(state['max_message_id'], event['message']['id'])
if 'raw_unread_msgs' in state:
apply_unread_message_event(
user_profile,
state['raw_unread_msgs'],
event['message'],
event['flags'],
)
elif event['type'] == "hotspots":
state['hotspots'] = event['hotspots']
elif event['type'] == "custom_profile_fields":
state['custom_profile_fields'] = event['fields']
elif event['type'] == "pointer":
state['pointer'] = max(state['pointer'], event['pointer'])
elif event['type'] == "realm_user":
person = event['person']
person_user_id = person['user_id']
if event['op'] == "add":
person = copy.deepcopy(person)
if client_gravatar:
if 'gravatar.com' in person['avatar_url']:
person['avatar_url'] = None
person['is_active'] = True
if not person['is_bot']:
person['profile_data'] = {}
state['raw_users'][person_user_id] = person
elif event['op'] == "remove":
state['raw_users'][person_user_id]['is_active'] = False
elif event['op'] == 'update':
is_me = (person_user_id == user_profile.id)
if is_me:
if ('avatar_url' in person and 'avatar_url' in state):
state['avatar_source'] = person['avatar_source']
state['avatar_url'] = person['avatar_url']
state['avatar_url_medium'] = person['avatar_url_medium']
for field in ['is_admin', 'email', 'full_name']:
if field in person and field in state:
state[field] = person[field]
# In the unlikely event that the current user
# just changed to/from being an admin, we need
# to add/remove the data on all bots in the
# realm. This is ugly and probably better
# solved by removing the all-realm-bots data
# given to admin users from this flow.
if ('is_admin' in person and 'realm_bots' in state):
prev_state = state['raw_users'][user_profile.id]
was_admin = prev_state['is_admin']
now_admin = person['is_admin']
if was_admin and not now_admin:
state['realm_bots'] = []
if not was_admin and now_admin:
state['realm_bots'] = get_owned_bot_dicts(user_profile)
if client_gravatar and 'avatar_url' in person:
# Respect the client_gravatar setting in the `users` data.
if 'gravatar.com' in person['avatar_url']:
person['avatar_url'] = None
person['avatar_url_medium'] = None
if person_user_id in state['raw_users']:
p = state['raw_users'][person_user_id]
for field in p:
if field in person:
p[field] = person[field]
if 'custom_profile_field' in person:
custom_field_id = person['custom_profile_field']['id']
custom_field_new_value = person['custom_profile_field']['value']
p['profile_data'][custom_field_id] = custom_field_new_value
elif event['type'] == 'realm_bot':
if event['op'] == 'add':
state['realm_bots'].append(event['bot'])
if event['op'] == 'remove':
email = event['bot']['email']
for bot in state['realm_bots']:
if bot['email'] == email:
bot['is_active'] = False
if event['op'] == 'delete':
state['realm_bots'] = [item for item
in state['realm_bots'] if item['email'] != event['bot']['email']]
if event['op'] == 'update':
for bot in state['realm_bots']:
if bot['email'] == event['bot']['email']:
if 'owner_id' in event['bot']:
bot['owner'] = get_user_profile_by_id(event['bot']['owner_id']).email
else:
bot.update(event['bot'])
elif event['type'] == 'stream':
if event['op'] == 'create':
for stream in event['streams']:
if not stream['invite_only']:
stream_data = copy.deepcopy(stream)
if include_subscribers:
stream_data['subscribers'] = []
stream_data['stream_weekly_traffic'] = None
stream_data['is_old_stream'] = False
stream_data['is_announcement_only'] = False
# Add stream to never_subscribed (if not invite_only)
state['never_subscribed'].append(stream_data)
state['streams'].append(stream)
state['streams'].sort(key=lambda elt: elt["name"])
if event['op'] == 'delete':
deleted_stream_ids = {stream['stream_id'] for stream in event['streams']}
state['streams'] = [s for s in state['streams'] if s['stream_id'] not in deleted_stream_ids]
state['never_subscribed'] = [stream for stream in state['never_subscribed'] if
stream['stream_id'] not in deleted_stream_ids]
if event['op'] == 'update':
# For legacy reasons, we call stream data 'subscriptions' in
# the state var here, for the benefit of the JS code.
for obj in state['subscriptions']:
if obj['name'].lower() == event['name'].lower():
obj[event['property']] = event['value']
# Also update the pure streams data
for stream in state['streams']:
if stream['name'].lower() == event['name'].lower():
prop = event['property']
if prop in stream:
stream[prop] = event['value']
elif event['op'] == "occupy":
state['streams'] += event['streams']
elif event['op'] == "vacate":
stream_ids = [s["stream_id"] for s in event['streams']]
state['streams'] = [s for s in state['streams'] if s["stream_id"] not in stream_ids]
elif event['type'] == 'default_streams':
state['realm_default_streams'] = event['default_streams']
elif event['type'] == 'default_stream_groups':
state['realm_default_stream_groups'] = event['default_stream_groups']
elif event['type'] == 'realm':
if event['op'] == "update":
field = 'realm_' + event['property']
state[field] = event['value']
# Tricky interaction: Whether we can create streams can get changed here.
if (field in ['realm_create_stream_by_admins_only',
'realm_waiting_period_threshold']) and 'can_create_streams' in state:
state['can_create_streams'] = user_profile.can_create_streams()
state['can_subscribe_other_users'] = user_profile.can_subscribe_other_users()
elif event['op'] == "update_dict":
for key, value in event['data'].items():
state['realm_' + key] = value
# It's a bit messy, but this is where we need to
# update the state for whether password authentication
# is enabled on this server.
if key == 'authentication_methods':
state['realm_password_auth_enabled'] = (value['Email'] or value['LDAP'])
state['realm_email_auth_enabled'] = value['Email']
elif event['type'] == "subscription":
if not include_subscribers and event['op'] in ['peer_add', 'peer_remove']:
return
if event['op'] in ["add"]:
if not include_subscribers:
# Avoid letting 'subscribers' entries end up in the list
for i, sub in enumerate(event['subscriptions']):
event['subscriptions'][i] = copy.deepcopy(event['subscriptions'][i])
del event['subscriptions'][i]['subscribers']
def name(sub: Dict[str, Any]) -> str:
return sub['name'].lower()
if event['op'] == "add":
added_names = set(map(name, event["subscriptions"]))
was_added = lambda s: name(s) in added_names
# add the new subscriptions
state['subscriptions'] += event['subscriptions']
# remove them from unsubscribed if they had been there
state['unsubscribed'] = [s for s in state['unsubscribed'] if not was_added(s)]
# remove them from never_subscribed if they had been there
state['never_subscribed'] = [s for s in state['never_subscribed'] if not was_added(s)]
elif event['op'] == "remove":
removed_names = set(map(name, event["subscriptions"]))
was_removed = lambda s: name(s) in removed_names
# Find the subs we are affecting.
removed_subs = list(filter(was_removed, state['subscriptions']))
# Remove our user from the subscribers of the removed subscriptions.
if include_subscribers:
for sub in removed_subs:
sub['subscribers'] = [id for id in sub['subscribers'] if id != user_profile.id]
# We must effectively copy the removed subscriptions from subscriptions to
# unsubscribe, since we only have the name in our data structure.
state['unsubscribed'] += removed_subs
# Now filter out the removed subscriptions from subscriptions.
state['subscriptions'] = [s for s in state['subscriptions'] if not was_removed(s)]
elif event['op'] == 'update':
for sub in state['subscriptions']:
if sub['name'].lower() == event['name'].lower():
sub[event['property']] = event['value']
elif event['op'] == 'peer_add':
user_id = event['user_id']
for sub in state['subscriptions']:
if (sub['name'] in event['subscriptions'] and
user_id not in sub['subscribers']):
sub['subscribers'].append(user_id)
for sub in state['never_subscribed']:
if (sub['name'] in event['subscriptions'] and
user_id not in sub['subscribers']):
sub['subscribers'].append(user_id)
elif event['op'] == 'peer_remove':
user_id = event['user_id']
for sub in state['subscriptions']:
if (sub['name'] in event['subscriptions'] and
user_id in sub['subscribers']):
sub['subscribers'].remove(user_id)
elif event['type'] == "presence":
# TODO: Add user_id to presence update events / state format!
presence_user_profile = get_user(event['email'], user_profile.realm)
state['presences'][event['email']] = UserPresence.get_status_dict_by_user(
presence_user_profile)[event['email']]
elif event['type'] == "update_message":
# We don't return messages in /register, so we don't need to
# do anything for content updates, but we may need to update
# the unread_msgs data if the topic of an unread message changed.
if TOPIC_NAME in event:
stream_dict = state['raw_unread_msgs']['stream_dict']
topic = event[TOPIC_NAME]
for message_id in event['message_ids']:
if message_id in stream_dict:
stream_dict[message_id]['topic'] = topic
elif event['type'] == "delete_message":
max_message = Message.objects.filter(
usermessage__user_profile=user_profile).order_by('-id').first()
if max_message:
state['max_message_id'] = max_message.id
else:
state['max_message_id'] = -1
remove_id = event['message_id']
remove_message_id_from_unread_mgs(state, remove_id)
elif event['type'] == "reaction":
# The client will get the message with the reactions directly
pass
elif event['type'] == "submessage":
# The client will get submessages with their messages
pass
elif event['type'] == 'typing':
# Typing notification events are transient and thus ignored
pass
elif event['type'] == "attachment":
# Attachment events are just for updating the "uploads" UI;
# they are not sent directly.
pass
elif event['type'] == "update_message_flags":
# We don't return messages in `/register`, so most flags we
# can ignore, but we do need to update the unread_msgs data if
# unread state is changed.
if event['flag'] == 'read' and event['operation'] == 'add':
for remove_id in event['messages']:
remove_message_id_from_unread_mgs(state, remove_id)
if event['flag'] == 'starred' and event['operation'] == 'add':
state['starred_messages'] += event['messages']
if event['flag'] == 'starred' and event['operation'] == 'remove':
state['starred_messages'] = [message for message in state['starred_messages']
if not (message in event['messages'])]
elif event['type'] == "realm_domains":
if event['op'] == 'add':
state['realm_domains'].append(event['realm_domain'])
elif event['op'] == 'change':
for realm_domain in state['realm_domains']:
if realm_domain['domain'] == event['realm_domain']['domain']:
realm_domain['allow_subdomains'] = event['realm_domain']['allow_subdomains']
elif event['op'] == 'remove':
state['realm_domains'] = [realm_domain for realm_domain in state['realm_domains']
if realm_domain['domain'] != event['domain']]
elif event['type'] == "realm_emoji":
state['realm_emoji'] = event['realm_emoji']
elif event['type'] == "alert_words":
state['alert_words'] = event['alert_words']
elif event['type'] == "muted_topics":
state['muted_topics'] = event["muted_topics"]
elif event['type'] == "realm_filters":
state['realm_filters'] = event["realm_filters"]
elif event['type'] == "update_display_settings":
assert event['setting_name'] in UserProfile.property_types
state[event['setting_name']] = event['setting']
elif event['type'] == "update_global_notifications":
assert event['notification_name'] in UserProfile.notification_setting_types
state[event['notification_name']] = event['setting']
elif event['type'] == "invites_changed":
pass
elif event['type'] == "user_group":
if event['op'] == 'add':
state['realm_user_groups'].append(event['group'])
state['realm_user_groups'].sort(key=lambda group: group['id'])
elif event['op'] == 'update':
for user_group in state['realm_user_groups']:
if user_group['id'] == event['group_id']:
user_group.update(event['data'])
elif event['op'] == 'add_members':
for user_group in state['realm_user_groups']:
if user_group['id'] == event['group_id']:
user_group['members'].extend(event['user_ids'])
user_group['members'].sort()
elif event['op'] == 'remove_members':
for user_group in state['realm_user_groups']:
if user_group['id'] == event['group_id']:
members = set(user_group['members'])
user_group['members'] = list(members - set(event['user_ids']))
user_group['members'].sort()
elif event['op'] == 'remove':
state['realm_user_groups'] = [ug for ug in state['realm_user_groups']
if ug['id'] != event['group_id']]
else:
raise AssertionError("Unexpected event type %s" % (event['type'],))
def do_events_register(user_profile: UserProfile, user_client: Client,
apply_markdown: bool = True,
client_gravatar: bool = False,
event_types: Optional[Iterable[str]] = None,
queue_lifespan_secs: int = 0,
all_public_streams: bool = False,
include_subscribers: bool = True,
narrow: Iterable[Sequence[str]] = [],
fetch_event_types: Optional[Iterable[str]] = None) -> Dict[str, Any]:
# Technically we don't need to check this here because
# build_narrow_filter will check it, but it's nicer from an error
# handling perspective to do it before contacting Tornado
check_supported_events_narrow_filter(narrow)
# Note that we pass event_types, not fetch_event_types here, since
# that's what controls which future events are sent.
queue_id = request_event_queue(user_profile, user_client, apply_markdown, client_gravatar,
queue_lifespan_secs, event_types, all_public_streams,
narrow=narrow)
if queue_id is None:
raise JsonableError(_("Could not allocate event queue"))
if fetch_event_types is not None:
event_types_set = set(fetch_event_types) # type: Optional[Set[str]]
elif event_types is not None:
event_types_set = set(event_types)
else:
event_types_set = None
# Fill up the UserMessage rows if a soft-deactivated user has returned
maybe_catch_up_soft_deactivated_user(user_profile)
ret = fetch_initial_state_data(user_profile, event_types_set, queue_id,
client_gravatar=client_gravatar,
include_subscribers=include_subscribers)
# Apply events that came in while we were fetching initial data
events = get_user_events(user_profile, queue_id, -1)
apply_events(ret, events, user_profile, include_subscribers=include_subscribers,
client_gravatar=client_gravatar,
fetch_event_types=fetch_event_types)
post_process_state(ret)
if len(events) > 0:
ret['last_event_id'] = events[-1]['id']
else:
ret['last_event_id'] = -1
return ret
def post_process_state(ret: Dict[str, Any]) -> None:
'''
NOTE:
Below is an example of post-processing initial state data AFTER we
apply events. For large payloads like `unread_msgs`, it's helpful
to have an intermediate data structure that is easy to manipulate
with O(1)-type operations as we apply events.
Then, only at the end, we put it in the form that's more appropriate
for client.
'''
if 'raw_unread_msgs' in ret:
ret['unread_msgs'] = aggregate_unread_data(ret['raw_unread_msgs'])
del ret['raw_unread_msgs']
'''
See the note above; the same technique applies below.
'''
if 'raw_users'in ret:
user_dicts = list(ret['raw_users'].values())
ret['realm_users'] = [d for d in user_dicts if d['is_active']]
ret['realm_non_active_users'] = [d for d in user_dicts if not d['is_active']]
'''
Be aware that we do intentional aliasing in the below code.
We can now safely remove the `is_active` field from all the
dicts that got partitioned into the two lists above.
We remove the field because it's already implied, and sending
it to clients makes clients prone to bugs where they "trust"
the field but don't actually update in live updates. It also
wastes bandwidth.
'''
for d in user_dicts:
d.pop('is_active')
del ret['raw_users']
| [
"[email protected]"
] | |
6fc0c48ca6a5e0811c61ffb490b39d781ac8d4f5 | eb7c49c58cab51248b249a9985cb58cfefc1cd90 | /distillation/distillation.py | 1a1c4732cc7e8e884caedf26d93070b2aa130ba0 | [] | no_license | kzky/reproductions | 5b6c43c12ec085586d812edfa8d79ae76750e397 | 28fdb33048ea1f7f1dbd8c94612513d4714a6c95 | refs/heads/master | 2021-01-18T01:59:51.435515 | 2017-09-10T11:58:21 | 2017-09-10T11:58:21 | 68,796,105 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,196 | py | from __future__ import absolute_import
from six.moves import range
import os
import nnabla as nn
import nnabla.logger as logger
import nnabla.functions as F
import nnabla.parametric_functions as PF
import nnabla.solvers as S
import nnabla.utils.save as save
from args import get_args
from mnist_data import data_iterator_mnist
from models import mnist_resnet_prediction, categorical_error, kl_divergence
def distil():
args = get_args()
# Get context.
from nnabla.contrib.context import extension_context
extension_module = args.context
if args.context is None:
extension_module = 'cpu'
logger.info("Running in %s" % extension_module)
ctx = extension_context(extension_module, device_id=args.device_id)
nn.set_default_context(ctx)
# Create CNN network for both training and testing.
mnist_cnn_prediction = mnist_resnet_prediction
# TRAIN
teacher = "teacher"
student = "student"
# Create input variables.
image = nn.Variable([args.batch_size, 1, 28, 28])
image.persistent = True # not clear the intermediate buffer re-used
label = nn.Variable([args.batch_size, 1])
label.persistent = True # not clear the intermediate buffer re-used
# Create `teacher` and "student" prediction graph.
model_load_path = args.model_load_path
nn.load_parameters(model_load_path)
pred_label = mnist_cnn_prediction(image, net=teacher, maps=64, test=False)
pred_label.need_grad = False # no need backward through teacher graph
pred = mnist_cnn_prediction(image, net=student, maps=32, test=False)
pred.persistent = True # not clear the intermediate buffer used
loss_ce = F.mean(F.softmax_cross_entropy(pred, label))
loss_kl = kl_divergence(pred, pred_label)
loss = args.weight_ce * loss_ce + args.weight_kl * loss_kl
# TEST
# Create input variables.
vimage = nn.Variable([args.batch_size, 1, 28, 28])
vlabel = nn.Variable([args.batch_size, 1])
# Create teacher predition graph.
vpred = mnist_cnn_prediction(vimage, net=student, maps=32, test=True)
# Create Solver.
solver = S.Adam(args.learning_rate)
with nn.parameter_scope(student):
solver.set_parameters(nn.get_parameters())
# Create monitor.
from nnabla.monitor import Monitor, MonitorSeries, MonitorTimeElapsed
monitor = Monitor(args.monitor_path)
monitor_loss = MonitorSeries("Training loss", monitor, interval=10)
monitor_err = MonitorSeries("Training error", monitor, interval=10)
monitor_time = MonitorTimeElapsed("Training time", monitor, interval=100)
monitor_verr = MonitorSeries("Test error", monitor, interval=10)
# Initialize DataIterator for MNIST.
data = data_iterator_mnist(args.batch_size, True)
vdata = data_iterator_mnist(args.batch_size, False)
best_ve = 1.0
# Training loop.
for i in range(args.max_iter):
if i % args.val_interval == 0:
# Validation
ve = 0.0
for j in range(args.val_iter):
vimage.d, vlabel.d = vdata.next()
vpred.forward(clear_buffer=True)
ve += categorical_error(vpred.d, vlabel.d)
monitor_verr.add(i, ve / args.val_iter)
if ve < best_ve:
nn.save_parameters(os.path.join(
args.model_save_path, 'params_%06d.h5' % i))
best_ve = ve
# Training forward
image.d, label.d = data.next()
solver.zero_grad()
loss.forward(clear_no_need_grad=True)
loss.backward(clear_buffer=True)
solver.weight_decay(args.weight_decay)
solver.update()
e = categorical_error(pred.d, label.d)
monitor_loss.add(i, loss.d.copy())
monitor_err.add(i, e)
monitor_time.add(i)
ve = 0.0
for j in range(args.val_iter):
vimage.d, vlabel.d = vdata.next()
vpred.forward(clear_buffer=True)
ve += categorical_error(vpred.d, vlabel.d)
monitor_verr.add(i, ve / args.val_iter)
parameter_file = os.path.join(
args.model_save_path, 'params_{:06}.h5'.format(args.max_iter))
nn.save_parameters(parameter_file)
if __name__ == '__main__':
distil()
| [
"[email protected]"
] | |
907069c4e874872e63285e89411fa6c7850e9249 | 49f1dabc455e78716f35240eb0d88bb8d3d51798 | /tests/unit/modules/storage/netapp/test_netapp_e_volume.py | fa2503f9326a03bb6a204d427bd715b6c4bbf074 | [] | no_license | ansible-collection-migration/netapp.netapp | b4bad4b22081a945e335c29193b7758192715a8d | ce88524a7d260b8a941aaab532a938b514600a26 | refs/heads/master | 2020-12-18T13:02:18.854497 | 2020-01-22T13:10:07 | 2020-01-22T13:10:07 | 235,393,037 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 59,414 | py | # coding=utf-8
# (c) 2018, NetApp Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
try:
from unittest import mock
except ImportError:
import mock
from ansible_collections.netapp.netapp.plugins.module_utils.netapp import NetAppESeriesModule
from ansible_collections.netapp.netapp.plugins.modules.netapp_e_volume import NetAppESeriesVolume
from ansible_collections.netapp.netapp.tests.unit.modules.utils import AnsibleFailJson, ModuleTestCase, set_module_args
class NetAppESeriesVolumeTest(ModuleTestCase):
REQUIRED_PARAMS = {"api_username": "username",
"api_password": "password",
"api_url": "http://localhost/devmgr/v2",
"ssid": "1",
"validate_certs": "no"}
THIN_VOLUME_RESPONSE = [{"capacity": "1288490188800",
"volumeRef": "3A000000600A098000A4B28D000010475C405428",
"status": "optimal",
"protectionType": "type1Protection",
"maxVirtualCapacity": "281474976710656",
"initialProvisionedCapacity": "4294967296",
"currentProvisionedCapacity": "4294967296",
"provisionedCapacityQuota": "1305670057984",
"growthAlertThreshold": 85,
"expansionPolicy": "automatic",
"flashCached": False,
"metadata": [{"key": "workloadId", "value": "4200000001000000000000000000000000000000"},
{"key": "volumeTypeId", "value": "volume"}],
"dataAssurance": True,
"segmentSize": 131072,
"diskPool": True,
"listOfMappings": [],
"mapped": False,
"currentControllerId": "070000000000000000000001",
"cacheSettings": {"readCacheEnable": True, "writeCacheEnable": True,
"readAheadMultiplier": 0},
"name": "thin_volume",
"id": "3A000000600A098000A4B28D000010475C405428"}]
VOLUME_GET_RESPONSE = [{"offline": False,
"raidLevel": "raid6",
"capacity": "214748364800",
"reconPriority": 1,
"segmentSize": 131072,
"volumeRef": "02000000600A098000A4B9D100000F095C2F7F31",
"status": "optimal",
"protectionInformationCapable": False,
"protectionType": "type0Protection",
"diskPool": True,
"flashCached": False,
"metadata": [{"key": "workloadId", "value": "4200000002000000000000000000000000000000"},
{"key": "volumeTypeId", "value": "Clare"}],
"dataAssurance": False,
"currentControllerId": "070000000000000000000002",
"cacheSettings": {"readCacheEnable": True, "writeCacheEnable": False,
"readAheadMultiplier": 0},
"thinProvisioned": False,
"totalSizeInBytes": "214748364800",
"name": "Matthew",
"id": "02000000600A098000A4B9D100000F095C2F7F31"},
{"offline": False,
"raidLevel": "raid6",
"capacity": "107374182400",
"reconPriority": 1,
"segmentSize": 131072,
"volumeRef": "02000000600A098000A4B28D00000FBE5C2F7F26",
"status": "optimal",
"protectionInformationCapable": False,
"protectionType": "type0Protection",
"diskPool": True,
"flashCached": False,
"metadata": [{"key": "workloadId", "value": "4200000002000000000000000000000000000000"},
{"key": "volumeTypeId", "value": "Samantha"}],
"dataAssurance": False,
"currentControllerId": "070000000000000000000001",
"cacheSettings": {"readCacheEnable": True, "writeCacheEnable": False,
"readAheadMultiplier": 0},
"thinProvisioned": False,
"totalSizeInBytes": "107374182400",
"name": "Samantha",
"id": "02000000600A098000A4B28D00000FBE5C2F7F26"},
{"offline": False,
"raidLevel": "raid6",
"capacity": "107374182400",
"segmentSize": 131072,
"volumeRef": "02000000600A098000A4B9D100000F0B5C2F7F40",
"status": "optimal",
"protectionInformationCapable": False,
"protectionType": "type0Protection",
"volumeGroupRef": "04000000600A098000A4B9D100000F085C2F7F26",
"diskPool": True,
"flashCached": False,
"metadata": [{"key": "workloadId", "value": "4200000002000000000000000000000000000000"},
{"key": "volumeTypeId", "value": "Micah"}],
"dataAssurance": False,
"currentControllerId": "070000000000000000000002",
"cacheSettings": {"readCacheEnable": True, "writeCacheEnable": False,
"readAheadMultiplier": 0},
"thinProvisioned": False,
"totalSizeInBytes": "107374182400",
"name": "Micah",
"id": "02000000600A098000A4B9D100000F0B5C2F7F40"}]
STORAGE_POOL_GET_RESPONSE = [{"offline": False,
"raidLevel": "raidDiskPool",
"volumeGroupRef": "04000000600A",
"securityType": "capable",
"protectionInformationCapable": False,
"protectionInformationCapabilities": {"protectionInformationCapable": True,
"protectionType": "type2Protection"},
"volumeGroupData": {"type": "diskPool",
"diskPoolData": {"reconstructionReservedDriveCount": 1,
"reconstructionReservedAmt": "296889614336",
"reconstructionReservedDriveCountCurrent": 1,
"poolUtilizationWarningThreshold": 0,
"poolUtilizationCriticalThreshold": 85,
"poolUtilizationState": "utilizationOptimal",
"unusableCapacity": "0",
"degradedReconstructPriority": "high",
"criticalReconstructPriority": "highest",
"backgroundOperationPriority": "low",
"allocGranularity": "4294967296"}},
"reservedSpaceAllocated": False,
"securityLevel": "fde",
"usedSpace": "863288426496",
"totalRaidedSpace": "2276332666880",
"raidStatus": "optimal",
"freeSpace": "1413044240384",
"drivePhysicalType": "sas",
"driveMediaType": "hdd",
"diskPool": True,
"id": "04000000600A098000A4B9D100000F085C2F7F26",
"name": "employee_data_storage_pool"},
{"offline": False,
"raidLevel": "raid1",
"volumeGroupRef": "04000000600A098000A4B28D00000FBD5C2F7F19",
"state": "complete",
"securityType": "capable",
"drawerLossProtection": False,
"protectionInformationCapable": False,
"protectionInformationCapabilities": {"protectionInformationCapable": True,
"protectionType": "type2Protection"},
"volumeGroupData": {"type": "unknown", "diskPoolData": None},
"reservedSpaceAllocated": False,
"securityLevel": "fde",
"usedSpace": "322122547200",
"totalRaidedSpace": "598926258176",
"raidStatus": "optimal",
"freeSpace": "276803710976",
"drivePhysicalType": "sas",
"driveMediaType": "hdd",
"diskPool": False,
"id": "04000000600A098000A4B28D00000FBD5C2F7F19",
"name": "database_storage_pool"}]
GET_LONG_LIVED_OPERATION_RESPONSE = [
{"returnCode": "ok",
"longLivedOpsProgress": [
{"volAction": "initializing", "reconstruct": None, "volExpansion": None, "volAndCapExpansion": None,
"init": {"volumeRef": "02000000600A098000A4B9D1000037315D494C6F", "pending": False, "percentComplete": 1, "timeToCompletion": 20},
"format": None, "volCreation": None, "volDeletion": None},
{"volAction": "initializing", "reconstruct": None, "volExpansion": None, "volAndCapExpansion": None,
"init": {"volumeRef": "02000000600A098000A4B28D00003D2C5D494C87", "pending": False, "percentComplete": 0, "timeToCompletion": 18},
"volCreation": None, "volDeletion": None}]},
{"returnCode": "ok",
"longLivedOpsProgress": [
{"volAction": "complete", "reconstruct": None, "volExpansion": None, "volAndCapExpansion": None,
"init": {"volumeRef": "02000000600A098000A4B9D1000037315D494C6F", "pending": False, "percentComplete": 1, "timeToCompletion": 20},
"format": None, "volCreation": None, "volDeletion": None},
{"volAction": "initializing", "reconstruct": None, "volExpansion": None, "volAndCapExpansion": None,
"init": {"volumeRef": "02000000600A098000A4B28D00003D2C5D494C87", "pending": False, "percentComplete": 0, "timeToCompletion": 18},
"volCreation": None, "volDeletion": None}]},
{"returnCode": "ok",
"longLivedOpsProgress": [
{"volAction": "initializing", "reconstruct": None, "volExpansion": None, "volAndCapExpansion": None,
"init": {"volumeRef": "02000000600A098000A4B9D1000037315D494C6F", "pending": False, "percentComplete": 1, "timeToCompletion": 20},
"format": None, "volCreation": None, "volDeletion": None},
{"volAction": "complete", "reconstruct": None, "volExpansion": None, "volAndCapExpansion": None,
"init": {"volumeRef": "02000000600A098000A4B28D00003D2C5D494C87", "pending": False, "percentComplete": 0, "timeToCompletion": 18},
"volCreation": None, "volDeletion": None}]},
{"returnCode": "ok",
"longLivedOpsProgress": [
{"volAction": "complete", "reconstruct": None, "volExpansion": None, "volAndCapExpansion": None,
"init": {"volumeRef": "02000000600A098000A4B9D1000037315D494C6F", "pending": False, "percentComplete": 1, "timeToCompletion": 20},
"format": None, "volCreation": None, "volDeletion": None},
{"volAction": "complete", "reconstruct": None, "volExpansion": None, "volAndCapExpansion": None,
"init": {"volumeRef": "02000000600A098000A4B28D00003D2C5D494C87", "pending": False, "percentComplete": 0, "timeToCompletion": 18},
"volCreation": None, "volDeletion": None}]}]
WORKLOAD_GET_RESPONSE = [{"id": "4200000001000000000000000000000000000000", "name": "general_workload_1",
"workloadAttributes": [{"key": "profileId", "value": "Other_1"}]},
{"id": "4200000002000000000000000000000000000000", "name": "employee_data",
"workloadAttributes": [{"key": "use", "value": "EmployeeData"},
{"key": "location", "value": "ICT"},
{"key": "private", "value": "public"},
{"key": "profileId", "value": "ansible_workload_1"}]},
{"id": "4200000003000000000000000000000000000000", "name": "customer_database",
"workloadAttributes": [{"key": "use", "value": "customer_information"},
{"key": "location", "value": "global"},
{"key": "profileId", "value": "ansible_workload_2"}]},
{"id": "4200000004000000000000000000000000000000", "name": "product_database",
"workloadAttributes": [{"key": "use", "value": "production_information"},
{"key": "security", "value": "private"},
{"key": "location", "value": "global"},
{"key": "profileId", "value": "ansible_workload_4"}]}]
REQUEST_FUNC = 'ansible_collections.netapp.netapp.plugins.modules.netapp_e_volume.NetAppESeriesVolume.request'
GET_VOLUME_FUNC = 'ansible_collections.netapp.netapp.plugins.modules.netapp_e_volume.NetAppESeriesVolume.get_volume'
SLEEP_FUNC = 'ansible_collections.netapp.netapp.plugins.modules.netapp_e_volume.sleep'
def _set_args(self, args=None):
module_args = self.REQUIRED_PARAMS.copy()
if args is not None:
module_args.update(args)
set_module_args(module_args)
def test_module_arguments_pass(self):
"""Ensure valid arguments successful create a class instance."""
arg_sets = [{"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 100, "size_unit": "tb",
"thin_provision": True, "thin_volume_repo_size": 64, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 10},
{"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 100, "size_unit": "gb",
"thin_provision": True, "thin_volume_repo_size": 64, "thin_volume_max_repo_size": 1024,
"thin_volume_growth_alert_threshold": 99},
{"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 100, "size_unit": "gb",
"thin_provision": True, "thin_volume_repo_size": 64},
{"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 100, "size_unit": "kb",
"thin_provision": True, "thin_volume_repo_size": 64, "thin_volume_max_repo_size": 67108864}]
# validate size normalization
for arg_set in arg_sets:
self._set_args(arg_set)
volume_object = NetAppESeriesVolume()
self.assertEqual(volume_object.size_b, volume_object.convert_to_aligned_bytes(arg_set["size"]))
self.assertEqual(volume_object.thin_volume_repo_size_b, volume_object.convert_to_aligned_bytes(arg_set["thin_volume_repo_size"]))
self.assertEqual(volume_object.thin_volume_expansion_policy, "automatic")
if "thin_volume_max_repo_size" not in arg_set.keys():
self.assertEqual(volume_object.thin_volume_max_repo_size_b, volume_object.convert_to_aligned_bytes(arg_set["size"]))
else:
self.assertEqual(volume_object.thin_volume_max_repo_size_b,
volume_object.convert_to_aligned_bytes(arg_set["thin_volume_max_repo_size"]))
# validate metadata form
self._set_args(
{"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 10, "workload_name": "workload1",
"metadata": {"availability": "public", "security": "low"}})
volume_object = NetAppESeriesVolume()
for entry in volume_object.metadata:
self.assertTrue(entry in [{'value': 'low', 'key': 'security'}, {'value': 'public', 'key': 'availability'}])
def test_module_arguments_fail(self):
"""Ensure invalid arguments values do not create a class instance."""
arg_sets = [{"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 100, "size_unit": "tb",
"thin_provision": True, "thin_volume_repo_size": 260},
{"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 10000, "size_unit": "tb",
"thin_provision": True, "thin_volume_repo_size": 64, "thin_volume_max_repo_size": 10},
{"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 10000, "size_unit": "gb",
"thin_provision": True, "thin_volume_repo_size": 64, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 9},
{"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 10000, "size_unit": "gb",
"thin_provision": True, "thin_volume_repo_size": 64, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 100}]
for arg_set in arg_sets:
with self.assertRaises(AnsibleFailJson):
self._set_args(arg_set)
print(arg_set)
volume_object = NetAppESeriesVolume()
def test_get_volume_pass(self):
"""Evaluate the get_volume method."""
with mock.patch(self.REQUEST_FUNC,
side_effect=[(200, self.VOLUME_GET_RESPONSE), (200, self.THIN_VOLUME_RESPONSE)]):
self._set_args({"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100})
volume_object = NetAppESeriesVolume()
self.assertEqual(volume_object.get_volume(),
[entry for entry in self.VOLUME_GET_RESPONSE if entry["name"] == "Matthew"][0])
with mock.patch(self.REQUEST_FUNC,
side_effect=[(200, self.VOLUME_GET_RESPONSE), (200, self.THIN_VOLUME_RESPONSE)]):
self._set_args({"state": "present", "name": "NotAVolume", "storage_pool_name": "pool", "size": 100})
volume_object = NetAppESeriesVolume()
self.assertEqual(volume_object.get_volume(), {})
def test_get_volume_fail(self):
"""Evaluate the get_volume exception paths."""
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to obtain list of thick volumes."):
with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
self._set_args({"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100})
volume_object = NetAppESeriesVolume()
volume_object.get_volume()
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to obtain list of thin volumes."):
with mock.patch(self.REQUEST_FUNC, side_effect=[(200, self.VOLUME_GET_RESPONSE), Exception()]):
self._set_args({"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100})
volume_object = NetAppESeriesVolume()
volume_object.get_volume()
def tests_wait_for_volume_availability_pass(self):
"""Ensure wait_for_volume_availability completes as expected."""
self._set_args({"state": "present", "name": "NewVolume", "storage_pool_name": "employee_data_storage_pool", "size": 100,
"wait_for_initialization": True})
volume_object = NetAppESeriesVolume()
with mock.patch(self.SLEEP_FUNC, return_value=None):
with mock.patch(self.GET_VOLUME_FUNC, side_effect=[False, False, True]):
volume_object.wait_for_volume_availability()
def tests_wait_for_volume_availability_fail(self):
"""Ensure wait_for_volume_availability throws the expected exceptions."""
self._set_args({"state": "present", "name": "NewVolume", "storage_pool_name": "employee_data_storage_pool", "size": 100,
"wait_for_initialization": True})
volume_object = NetAppESeriesVolume()
volume_object.get_volume = lambda: False
with self.assertRaisesRegexp(AnsibleFailJson, "Timed out waiting for the volume"):
with mock.patch(self.SLEEP_FUNC, return_value=None):
volume_object.wait_for_volume_availability()
def tests_wait_for_volume_action_pass(self):
"""Ensure wait_for_volume_action completes as expected."""
self._set_args({"state": "present", "name": "NewVolume", "storage_pool_name": "employee_data_storage_pool", "size": 100,
"wait_for_initialization": True})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"id": "02000000600A098000A4B9D1000037315D494C6F",
"storageVolumeRef": "02000000600A098000A4B9D1000037315DXXXXXX"}
with mock.patch(self.SLEEP_FUNC, return_value=None):
with mock.patch(self.REQUEST_FUNC, side_effect=[(200, self.GET_LONG_LIVED_OPERATION_RESPONSE[0]),
(200, self.GET_LONG_LIVED_OPERATION_RESPONSE[1]),
(200, self.GET_LONG_LIVED_OPERATION_RESPONSE[2]),
(200, self.GET_LONG_LIVED_OPERATION_RESPONSE[3])]):
volume_object.wait_for_volume_action()
self._set_args({"state": "present", "name": "NewVolume", "storage_pool_name": "employee_data_storage_pool", "size": 100,
"wait_for_initialization": True})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"id": "02000000600A098000A4B9D1000037315DXXXXXX",
"storageVolumeRef": "02000000600A098000A4B9D1000037315D494C6F"}
with mock.patch(self.SLEEP_FUNC, return_value=None):
with mock.patch(self.REQUEST_FUNC, side_effect=[(200, self.GET_LONG_LIVED_OPERATION_RESPONSE[0]),
(200, self.GET_LONG_LIVED_OPERATION_RESPONSE[1]),
(200, self.GET_LONG_LIVED_OPERATION_RESPONSE[2]),
(200, self.GET_LONG_LIVED_OPERATION_RESPONSE[3])]):
volume_object.wait_for_volume_action()
def tests_wait_for_volume_action_fail(self):
"""Ensure wait_for_volume_action throws the expected exceptions."""
self._set_args({"state": "present", "name": "NewVolume", "storage_pool_name": "employee_data_storage_pool", "size": 100,
"wait_for_initialization": True})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"id": "02000000600A098000A4B9D1000037315DXXXXXX",
"storageVolumeRef": "02000000600A098000A4B9D1000037315D494C6F"}
with mock.patch(self.SLEEP_FUNC, return_value=None):
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to get volume expansion progress."):
with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
volume_object.wait_for_volume_action()
with self.assertRaisesRegexp(AnsibleFailJson, "Expansion action failed to complete."):
with mock.patch(self.REQUEST_FUNC, return_value=(200, self.GET_LONG_LIVED_OPERATION_RESPONSE[0])):
volume_object.wait_for_volume_action(timeout=300)
def test_get_storage_pool_pass(self):
"""Evaluate the get_storage_pool method."""
with mock.patch(self.REQUEST_FUNC, return_value=(200, self.STORAGE_POOL_GET_RESPONSE)):
self._set_args({"state": "present", "name": "NewVolume", "storage_pool_name": "employee_data_storage_pool",
"size": 100})
volume_object = NetAppESeriesVolume()
self.assertEqual(volume_object.get_storage_pool(), [entry for entry in self.STORAGE_POOL_GET_RESPONSE if
entry["name"] == "employee_data_storage_pool"][0])
self._set_args(
{"state": "present", "name": "NewVolume", "storage_pool_name": "NotAStoragePool", "size": 100})
volume_object = NetAppESeriesVolume()
self.assertEqual(volume_object.get_storage_pool(), {})
def test_get_storage_pool_fail(self):
"""Evaluate the get_storage_pool exception paths."""
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to obtain list of storage pools."):
with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
self._set_args({"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100})
volume_object = NetAppESeriesVolume()
volume_object.get_storage_pool()
def test_check_storage_pool_sufficiency_pass(self):
"""Ensure passing logic."""
self._set_args({"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100})
volume_object = NetAppESeriesVolume()
volume_object.pool_detail = [entry for entry in self.STORAGE_POOL_GET_RESPONSE
if entry["name"] == "employee_data_storage_pool"][0]
volume_object.check_storage_pool_sufficiency()
def test_check_storage_pool_sufficiency_fail(self):
"""Validate exceptions are thrown for insufficient storage pool resources."""
self._set_args({"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 100, "size_unit": "tb",
"thin_provision": True, "thin_volume_repo_size": 64, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 10})
volume_object = NetAppESeriesVolume()
with self.assertRaisesRegexp(AnsibleFailJson, "Requested storage pool"):
volume_object.check_storage_pool_sufficiency()
with self.assertRaisesRegexp(AnsibleFailJson,
"Thin provisioned volumes can only be created on raid disk pools."):
volume_object.pool_detail = [entry for entry in self.STORAGE_POOL_GET_RESPONSE
if entry["name"] == "database_storage_pool"][0]
volume_object.volume_detail = {}
volume_object.check_storage_pool_sufficiency()
with self.assertRaisesRegexp(AnsibleFailJson, "requires the storage pool to be DA-compatible."):
volume_object.pool_detail = {"diskPool": True,
"protectionInformationCapabilities": {"protectionType": "type0Protection",
"protectionInformationCapable": False}}
volume_object.volume_detail = {}
volume_object.data_assurance_enabled = True
volume_object.check_storage_pool_sufficiency()
volume_object.pool_detail = {"diskPool": True,
"protectionInformationCapabilities": {"protectionType": "type2Protection",
"protectionInformationCapable": True}}
volume_object.check_storage_pool_sufficiency()
self._set_args({"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 100, "size_unit": "tb",
"thin_provision": False})
volume_object = NetAppESeriesVolume()
with self.assertRaisesRegexp(AnsibleFailJson,
"Not enough storage pool free space available for the volume's needs."):
volume_object.pool_detail = {"freeSpace": 10, "diskPool": True,
"protectionInformationCapabilities": {"protectionType": "type2Protection",
"protectionInformationCapable": True}}
volume_object.volume_detail = {"totalSizeInBytes": 100}
volume_object.data_assurance_enabled = True
volume_object.size_b = 1
volume_object.check_storage_pool_sufficiency()
def test_update_workload_tags_pass(self):
"""Validate updating workload tags."""
test_sets = [[{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100}, False],
[{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
"workload_name": "employee_data"}, False],
[{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
"workload_name": "customer_database",
"metadata": {"use": "customer_information", "location": "global"}}, False],
[{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
"workload_name": "customer_database",
"metadata": {"use": "customer_information"}}, True],
[{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
"workload_name": "customer_database",
"metadata": {"use": "customer_information", "location": "local"}}, True],
[{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
"workload_name": "customer_database",
"metadata": {"use": "customer_information", "location": "global", "importance": "no"}}, True],
[{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
"workload_name": "newWorkload",
"metadata": {"for_testing": "yes"}}, True],
[{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
"workload_name": "newWorkload"}, True]]
for test in test_sets:
self._set_args(test[0])
volume_object = NetAppESeriesVolume()
with mock.patch(self.REQUEST_FUNC, side_effect=[(200, self.WORKLOAD_GET_RESPONSE), (200, {"id": 1})]):
self.assertEqual(volume_object.update_workload_tags(), test[1])
def test_update_workload_tags_fail(self):
"""Validate updating workload tags fails appropriately."""
self._set_args({"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
"workload_name": "employee_data"})
volume_object = NetAppESeriesVolume()
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to retrieve storage array workload tags."):
with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
volume_object.update_workload_tags()
self._set_args({"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
"workload_name": "employee_data", "metadata": {"key": "not-use", "value": "EmployeeData"}})
volume_object = NetAppESeriesVolume()
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to create new workload tag."):
with mock.patch(self.REQUEST_FUNC, side_effect=[(200, self.WORKLOAD_GET_RESPONSE), Exception()]):
volume_object.update_workload_tags()
self._set_args({"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
"workload_name": "employee_data2", "metadata": {"key": "use", "value": "EmployeeData"}})
volume_object = NetAppESeriesVolume()
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to create new workload tag."):
with mock.patch(self.REQUEST_FUNC, side_effect=[(200, self.WORKLOAD_GET_RESPONSE), Exception()]):
volume_object.update_workload_tags()
def test_get_volume_property_changes_pass(self):
"""Verify correct dictionary is returned"""
# no property changes
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "ssd_cache_enabled": True,
"read_cache_enable": True, "write_cache_enable": True,
"read_ahead_enable": True, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"metadata": [],
"cacheSettings": {"cwob": False, "readCacheEnable": True, "writeCacheEnable": True,
"readAheadMultiplier": 1}, "flashCached": True,
"segmentSize": str(128 * 1024)}
self.assertEqual(volume_object.get_volume_property_changes(), dict())
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "ssd_cache_enabled": True,
"read_cache_enable": True, "write_cache_enable": True,
"read_ahead_enable": True, "thin_provision": True, "thin_volume_repo_size": 64,
"thin_volume_max_repo_size": 1000, "thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"metadata": [],
"cacheSettings": {"cwob": False, "readCacheEnable": True, "writeCacheEnable": True,
"readAheadMultiplier": 1},
"flashCached": True, "growthAlertThreshold": "90",
"expansionPolicy": "automatic", "segmentSize": str(128 * 1024)}
self.assertEqual(volume_object.get_volume_property_changes(), dict())
# property changes
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "ssd_cache_enabled": True,
"read_cache_enable": True, "write_cache_enable": True,
"read_ahead_enable": True, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"metadata": [],
"cacheSettings": {"cwob": False, "readCacheEnable": False, "writeCacheEnable": True,
"readAheadMultiplier": 1}, "flashCached": True,
"segmentSize": str(128 * 1024)}
self.assertEqual(volume_object.get_volume_property_changes(),
{"metaTags": [], 'cacheSettings': {'readCacheEnable': True, 'writeCacheEnable': True},
'flashCache': True})
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "ssd_cache_enabled": True,
"read_cache_enable": True, "write_cache_enable": True, "cache_without_batteries": False,
"read_ahead_enable": True, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"metadata": [],
"cacheSettings": {"cwob": False, "readCacheEnable": True, "writeCacheEnable": False,
"readAheadMultiplier": 1}, "flashCached": True,
"segmentSize": str(128 * 1024)}
self.assertEqual(volume_object.get_volume_property_changes(),
{"metaTags": [], 'cacheSettings': {'readCacheEnable': True, 'writeCacheEnable': True},
'flashCache': True})
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "ssd_cache_enabled": True,
"read_cache_enable": True, "write_cache_enable": True, "cache_without_batteries": True,
"read_ahead_enable": True, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"metadata": [],
"cacheSettings": {"cwob": False, "readCacheEnable": True, "writeCacheEnable": True,
"readAheadMultiplier": 1}, "flashCached": False,
"segmentSize": str(128 * 1024)}
self.assertEqual(volume_object.get_volume_property_changes(),
{"metaTags": [], 'cacheSettings': {'readCacheEnable': True, 'writeCacheEnable': True, "cacheWithoutBatteries": True},
'flashCache': True})
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "ssd_cache_enabled": True,
"read_cache_enable": True, "write_cache_enable": True, "cache_without_batteries": True,
"read_ahead_enable": False, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"metadata": [],
"cacheSettings": {"cwob": False, "readCacheEnable": True, "writeCacheEnable": True,
"readAheadMultiplier": 1}, "flashCached": False,
"segmentSize": str(128 * 1024)}
self.assertEqual(volume_object.get_volume_property_changes(), {"metaTags": [],
'cacheSettings': {'readCacheEnable': True,
'writeCacheEnable': True,
'readAheadEnable': False,
"cacheWithoutBatteries": True},
'flashCache': True})
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "ssd_cache_enabled": True,
"read_cache_enable": True, "write_cache_enable": True,
"read_ahead_enable": True, "thin_provision": True, "thin_volume_repo_size": 64,
"thin_volume_max_repo_size": 1000, "thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"metadata": [],
"cacheSettings": {"cwob": True, "readCacheEnable": True, "writeCacheEnable": True,
"readAheadMultiplier": 1},
"flashCached": True, "growthAlertThreshold": "95",
"expansionPolicy": "automatic", "segmentSize": str(128 * 1024)}
self.assertEqual(volume_object.get_volume_property_changes(),
{"metaTags": [], 'cacheSettings': {'readCacheEnable': True, 'writeCacheEnable': True},
'growthAlertThreshold': 90, 'flashCache': True})
def test_get_volume_property_changes_fail(self):
"""Verify correct exception is thrown"""
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "ssd_cache_enabled": True,
"read_cache_enable": True, "write_cache_enable": True, "read_ahead_enable": True, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {
"cacheSettings": {"cwob": False, "readCacheEnable": True, "writeCacheEnable": True, "readAheadMultiplier": 1},
"flashCached": True, "segmentSize": str(512 * 1024)}
with self.assertRaisesRegexp(AnsibleFailJson, "Existing volume segment size is"):
volume_object.get_volume_property_changes()
def test_get_expand_volume_changes_pass(self):
"""Verify expansion changes."""
# thick volumes
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"capacity": str(50 * 1024 * 1024 * 1024), "thinProvisioned": False}
self.assertEqual(volume_object.get_expand_volume_changes(),
{"sizeUnit": "bytes", "expansionSize": 100 * 1024 * 1024 * 1024})
# thin volumes
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
"thin_volume_expansion_policy": "automatic", "thin_volume_repo_size": 64,
"thin_volume_max_repo_size": 1000, "thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"capacity": str(50 * 1024 * 1024 * 1024), "thinProvisioned": True,
"expansionPolicy": "automatic",
"provisionedCapacityQuota": str(1000 * 1024 * 1024 * 1024)}
self.assertEqual(volume_object.get_expand_volume_changes(),
{"sizeUnit": "bytes", "newVirtualSize": 100 * 1024 * 1024 * 1024})
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
"thin_volume_expansion_policy": "automatic", "thin_volume_repo_size": 64,
"thin_volume_max_repo_size": 1000, "thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"capacity": str(100 * 1024 * 1024 * 1024), "thinProvisioned": True,
"expansionPolicy": "automatic",
"provisionedCapacityQuota": str(500 * 1024 * 1024 * 1024)}
self.assertEqual(volume_object.get_expand_volume_changes(),
{"sizeUnit": "bytes", "newRepositorySize": 1000 * 1024 * 1024 * 1024})
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
"thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 504, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"capacity": str(100 * 1024 * 1024 * 1024), "thinProvisioned": True,
"expansionPolicy": "manual",
"currentProvisionedCapacity": str(500 * 1024 * 1024 * 1024)}
self.assertEqual(volume_object.get_expand_volume_changes(),
{"sizeUnit": "bytes", "newRepositorySize": 504 * 1024 * 1024 * 1024})
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
"thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 756, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"capacity": str(100 * 1024 * 1024 * 1024), "thinProvisioned": True,
"expansionPolicy": "manual",
"currentProvisionedCapacity": str(500 * 1024 * 1024 * 1024)}
self.assertEqual(volume_object.get_expand_volume_changes(),
{"sizeUnit": "bytes", "newRepositorySize": 756 * 1024 * 1024 * 1024})
def test_get_expand_volume_changes_fail(self):
"""Verify exceptions are thrown."""
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"capacity": str(1000 * 1024 * 1024 * 1024)}
with self.assertRaisesRegexp(AnsibleFailJson, "Reducing the size of volumes is not permitted."):
volume_object.get_expand_volume_changes()
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
"thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 502, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"capacity": str(100 * 1024 * 1024 * 1024), "thinProvisioned": True,
"expansionPolicy": "manual",
"currentProvisionedCapacity": str(500 * 1024 * 1024 * 1024)}
with self.assertRaisesRegexp(AnsibleFailJson, "The thin volume repository increase must be between or equal"):
volume_object.get_expand_volume_changes()
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
"thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 760, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"capacity": str(100 * 1024 * 1024 * 1024), "thinProvisioned": True,
"expansionPolicy": "manual",
"currentProvisionedCapacity": str(500 * 1024 * 1024 * 1024)}
with self.assertRaisesRegexp(AnsibleFailJson, "The thin volume repository increase must be between or equal"):
volume_object.get_expand_volume_changes()
def test_create_volume_pass(self):
"""Verify volume creation."""
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.pool_detail = {"id": "12345"}
with mock.patch(self.REQUEST_FUNC, return_value=(200, {})):
volume_object.create_volume()
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
"thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 760, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.pool_detail = {"id": "12345"}
with mock.patch(self.REQUEST_FUNC, return_value=(200, {})):
volume_object.create_volume()
def test_create_volume_fail(self):
"""Verify exceptions thrown."""
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.pool_detail = {"id": "12345"}
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to create volume."):
with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
volume_object.create_volume()
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
"thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 760, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.pool_detail = {"id": "12345"}
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to create thin volume."):
with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
volume_object.create_volume()
def test_update_volume_properties_pass(self):
"""verify property update."""
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.pool_detail = {"id": "12345"}
volume_object.wait_for_volume_availability = lambda: None
volume_object.get_volume = lambda: {"id": "12345'"}
volume_object.get_volume_property_changes = lambda: {
'cacheSettings': {'readCacheEnable': True, 'writeCacheEnable': True}, 'growthAlertThreshold': 90,
'flashCached': True}
volume_object.workload_id = "4200000001000000000000000000000000000000"
with mock.patch(self.REQUEST_FUNC, return_value=(200, {})):
self.assertTrue(volume_object.update_volume_properties())
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
"thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 760, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.pool_detail = {"id": "12345"}
volume_object.wait_for_volume_availability = lambda: None
volume_object.get_volume = lambda: {"id": "12345'"}
volume_object.get_volume_property_changes = lambda: {
'cacheSettings': {'readCacheEnable': True, 'writeCacheEnable': True}, 'growthAlertThreshold': 90,
'flashCached': True}
volume_object.workload_id = "4200000001000000000000000000000000000000"
with mock.patch(self.REQUEST_FUNC, return_value=(200, {})):
self.assertTrue(volume_object.update_volume_properties())
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.pool_detail = {"metadata": [{"key": "workloadId", "value": "12345"}]}
volume_object.wait_for_volume_availability = lambda: None
volume_object.get_volume = lambda: {"id": "12345'"}
volume_object.get_volume_property_changes = lambda: {}
volume_object.workload_id = "4200000001000000000000000000000000000000"
self.assertFalse(volume_object.update_volume_properties())
def test_update_volume_properties_fail(self):
"""Verify exceptions are thrown."""
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.pool_detail = {"id": "12345"}
volume_object.wait_for_volume_availability = lambda: None
volume_object.get_volume = lambda: {"id": "12345'"}
volume_object.get_volume_property_changes = lambda: {
'cacheSettings': {'readCacheEnable': True, 'writeCacheEnable': True}, 'growthAlertThreshold': 90,
'flashCached': True}
volume_object.workload_id = "4200000001000000000000000000000000000000"
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to update volume properties."):
with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
self.assertTrue(volume_object.update_volume_properties())
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
"thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 760, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.pool_detail = {"id": "12345"}
volume_object.wait_for_volume_availability = lambda: None
volume_object.get_volume = lambda: {"id": "12345'"}
volume_object.get_volume_property_changes = lambda: {
'cacheSettings': {'readCacheEnable': True, 'writeCacheEnable': True}, 'growthAlertThreshold': 90,
'flashCached': True}
volume_object.workload_id = "4200000001000000000000000000000000000000"
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to update thin volume properties."):
with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
self.assertTrue(volume_object.update_volume_properties())
def test_expand_volume_pass(self):
"""Verify volume expansion."""
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.get_expand_volume_changes = lambda: {"sizeUnit": "bytes",
"expansionSize": 100 * 1024 * 1024 * 1024}
volume_object.volume_detail = {"id": "12345", "thinProvisioned": True}
with mock.patch(self.REQUEST_FUNC, return_value=(200, {})):
volume_object.expand_volume()
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
"thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 760, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.get_expand_volume_changes = lambda: {"sizeUnit": "bytes",
"expansionSize": 100 * 1024 * 1024 * 1024}
volume_object.volume_detail = {"id": "12345", "thinProvisioned": True}
with mock.patch(self.REQUEST_FUNC, return_value=(200, {})):
volume_object.expand_volume()
def test_expand_volume_fail(self):
"""Verify exceptions are thrown."""
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.get_expand_volume_changes = lambda: {"sizeUnit": "bytes",
"expansionSize": 100 * 1024 * 1024 * 1024}
volume_object.volume_detail = {"id": "12345", "thinProvisioned": False}
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to expand volume."):
with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
volume_object.expand_volume()
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True})
volume_object = NetAppESeriesVolume()
volume_object.get_expand_volume_changes = lambda: {"sizeUnit": "bytes",
"expansionSize": 100 * 1024 * 1024 * 1024}
volume_object.volume_detail = {"id": "12345", "thinProvisioned": True}
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to expand thin volume."):
with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
volume_object.expand_volume()
def test_delete_volume_pass(self):
"""Verify volume deletion."""
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"id": "12345"}
with mock.patch(self.REQUEST_FUNC, return_value=(200, {})):
volume_object.delete_volume()
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
"thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 760, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"id": "12345"}
with mock.patch(self.REQUEST_FUNC, return_value=(200, {})):
volume_object.delete_volume()
def test_delete_volume_fail(self):
"""Verify exceptions are thrown."""
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
volume_object = NetAppESeriesVolume()
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to delete volume."):
with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
volume_object.delete_volume()
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True})
volume_object = NetAppESeriesVolume()
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to delete thin volume."):
with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
volume_object.delete_volume()
| [
"[email protected]"
] | |
3d3247e759523d5a191a42471586868ee3cb0831 | 69f4d47bef0e3b4150891127717d6f4d52193b47 | /sinaSpider/sinaSpider/spiders/sina.py | 356db6054c3f6ed71e0e9e53f2492f805c8423ad | [] | no_license | hyhlinux/stu_scrapy | 27e45ad10eb9b85aada5ced34d29f00923555bf2 | 36afdb88f84a82453bf03070ed049599386a83c0 | refs/heads/master | 2020-06-12T11:55:19.986569 | 2016-12-10T03:18:37 | 2016-12-10T03:18:37 | 75,581,107 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,378 | py | # -*- coding: utf-8 -*-
import scrapy
import sys
import os
from sinaSpider.items import SinaspiderItem as SinaItem
reload(sys)
sys.setdefaultencoding("utf-8")
class SinaSpider(scrapy.Spider):
name = "sina"
allowed_domains = ["sina.com.cn"]
start_urls = ["http://news.sina.com.cn/guide/"]
def parse(self, response):
# print response.body
#test ok
items= []
# 所有大类的url 和 标题
parentUrls = response.xpath('//div[@id=\"tab01\"]/div/h3/a/@href').extract()
parentTitle = response.xpath("//div[@id=\"tab01\"]/div/h3/a/text()").extract()
# 所有小类的ur 和 标题
subUrls = response.xpath('//div[@id=\"tab01\"]/div/ul/li/a/@href').extract()
subTitle = response.xpath('//div[@id=\"tab01\"]/div/ul/li/a/text()').extract()
#爬取所有大类
for i in range(0, len(parentTitle)):
# 指定大类目录的路径和目录名
parentFilename = "./Data/" + parentTitle[i]
#如果目录不存在,则创建目录
if(not os.path.exists(parentFilename)):
os.makedirs(parentFilename)
# 爬取所有小类
for j in range(0, len(subUrls)):
item = SinaItem()
# 保存大类的title和urls
item['parentTitle'] = parentTitle[i]
item['parentUrls'] = parentUrls[i]
# 检查小类的url是否以同类别大类url开头,如果是返回True (sports.sina.com.cn 和 sports.sina.com.cn/nba)
if_belong = subUrls[j].startswith(item['parentUrls'])
# 如果属于本大类,将存储目录放在本大类目录下
if(if_belong):
subFilename =parentFilename + '/'+ subTitle[j]
# 如果目录不存在,则创建目录
if(not os.path.exists(subFilename)):
os.makedirs(subFilename)
# 存储 小类url、title和filename字段数据
item['subUrls'] = subUrls[j]
item['subTitle'] =subTitle[j]
item['subFilename'] = subFilename
items.append(item)
#发送每个小类url的Request请求,得到Response连同包含meta数据 一同交给回调函数 second_parse 方法处理
for item in items:
yield scrapy.Request( url = item['subUrls'], meta={'meta_1': item}, callback=self.second_parse)
#对于返回的小类的url,再进行递归请求
def second_parse(self, response):
# 提取每次Response的meta数据
meta_1= response.meta['meta_1']
# 取出小类里所有子链接
sonUrls = response.xpath('//a/@href').extract()
items= []
for i in range(0, len(sonUrls)):
# 检查每个链接是否以大类url开头、以.shtml结尾,如果是返回True
if_belong = sonUrls[i].endswith('.shtml') and sonUrls[i].startswith(meta_1['parentUrls'])
# 如果属于本大类,获取字段值放在同一个item下便于传输
if(if_belong):
item = SinaItem()
item['parentTitle'] =meta_1['parentTitle']
item['parentUrls'] =meta_1['parentUrls']
item['subUrls'] = meta_1['subUrls']
item['subTitle'] = meta_1['subTitle']
item['subFilename'] = meta_1['subFilename']
item['sonUrls'] = sonUrls[i]
items.append(item)
#发送每个小类下子链接url的Request请求,得到Response后连同包含meta数据 一同交给回调函数 detail_parse 方法处理
for item in items:
yield scrapy.Request(url=item['sonUrls'], meta={'meta_2':item}, callback = self.detail_parse)
# 数据解析方法,获取文章标题和内容
def detail_parse(self, response):
item = response.meta['meta_2']
content = ""
head = response.xpath('//h1[@id=\"main_title\"]/text()')
content_list = response.xpath('//div[@id=\"artibody\"]/p/text()').extract()
# 将p标签里的文本内容合并到一起
for content_one in content_list:
content += content_one
item['head']= head
item['content']= content
yield item
| [
"[email protected]"
] | |
954fa3a0875b17570658a68818a3a968463ec410 | c1bb9a11278e28d074eb77fc179bf80ccd298f0d | /zinnia/feeds.py | 1bf8d3ee7457d59fb389acc7e84e53c01bfee050 | [] | no_license | ljarufe/mp100 | 394f6fbf76837b7fa6abf5de2901faa8c5561008 | a68d39a3e3b93c0b81f2893d61773b5a6453d108 | refs/heads/master | 2021-07-09T23:18:39.722196 | 2013-10-21T17:21:07 | 2013-10-21T17:21:07 | 13,749,228 | 0 | 1 | null | 2020-07-25T20:23:01 | 2013-10-21T17:14:12 | Python | UTF-8 | Python | false | false | 10,162 | py | """Feeds for Zinnia"""
from urlparse import urljoin
from BeautifulSoup import BeautifulSoup
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404
from django.utils.feedgenerator import Atom1Feed
from django.utils.translation import ugettext as _
from django.contrib.syndication.views import Feed
from django.core.urlresolvers import NoReverseMatch
from tagging.models import Tag
from tagging.models import TaggedItem
from zinnia.models import Entry
from zinnia.settings import COPYRIGHT
from zinnia.settings import PROTOCOL
from zinnia.settings import FEEDS_FORMAT
from zinnia.settings import FEEDS_MAX_ITEMS
from zinnia.managers import entries_published
from zinnia.views.categories import get_category_or_404
from zinnia.templatetags.zinnia_tags import get_gravatar
class ZinniaFeed(Feed):
"""Base Feed for Zinnia"""
feed_copyright = COPYRIGHT
def __init__(self):
self.site = Site.objects.get_current()
self.site_url = '%s://%s' % (PROTOCOL, self.site.domain)
if FEEDS_FORMAT == 'atom':
self.feed_type = Atom1Feed
self.subtitle = self.description
class EntryFeed(ZinniaFeed):
"""Base Entry Feed"""
title_template = 'feeds/entry_title.html'
description_template = 'feeds/entry_description.html'
def item_pubdate(self, item):
"""Publication date of an entry"""
return item.creation_date
def item_categories(self, item):
"""Entry's categories"""
return [category.title for category in item.categories.all()]
def item_author_name(self, item):
"""Returns the first author of an entry"""
if item.authors.count():
self.item_author = item.authors.all()[0]
return self.item_author.username
def item_author_email(self, item):
"""Returns the first author's email"""
return self.item_author.email
def item_author_link(self, item):
"""Returns the author's URL"""
try:
author_url = reverse('zinnia_author_detail',
args=[self.item_author.username])
return self.site_url + author_url
except NoReverseMatch:
return self.site_url
def item_enclosure_url(self, item):
"""Returns an image for enclosure"""
if item.image:
return item.image.url
img = BeautifulSoup(item.html_content).find('img')
if img:
return urljoin(self.site_url, img['src'])
def item_enclosure_length(self, item):
"""Hardcoded enclosure length"""
return '100000'
def item_enclosure_mime_type(self, item):
"""Hardcoded enclosure mimetype"""
return 'image/jpeg'
class LatestEntries(EntryFeed):
"""Feed for the latest entries"""
def link(self):
"""URL of latest entries"""
return reverse('zinnia_entry_archive_index')
def items(self):
"""Items are published entries"""
return Entry.published.all()[:FEEDS_MAX_ITEMS]
def title(self):
"""Title of the feed"""
return '%s - %s' % (self.site.name, _('Latest entries'))
def description(self):
"""Description of the feed"""
return _('The latest entries for the site %s') % self.site.name
class CategoryEntries(EntryFeed):
"""Feed filtered by a category"""
def get_object(self, request, path):
"""Retrieve the category by his path"""
return get_category_or_404(path)
def items(self, obj):
"""Items are the published entries of the category"""
return obj.entries_published_set()[:FEEDS_MAX_ITEMS]
def link(self, obj):
"""URL of the category"""
return obj.get_absolute_url()
def title(self, obj):
"""Title of the feed"""
return _('Entries for the category %s') % obj.title
def description(self, obj):
"""Description of the feed"""
return _('The latest entries for the category %s') % obj.title
class AuthorEntries(EntryFeed):
"""Feed filtered by an author"""
def get_object(self, request, username):
"""Retrieve the author by his username"""
return get_object_or_404(User, username=username)
def items(self, obj):
"""Items are the published entries of the author"""
return entries_published(obj.entry_set)[:FEEDS_MAX_ITEMS]
def link(self, obj):
"""URL of the author"""
return reverse('zinnia_author_detail', args=[obj.username])
def title(self, obj):
"""Title of the feed"""
return _('Entries for author %s') % obj.username
def description(self, obj):
"""Description of the feed"""
return _('The latest entries by %s') % obj.username
class TagEntries(EntryFeed):
"""Feed filtered by a tag"""
def get_object(self, request, slug):
"""Retrieve the tag by his name"""
return get_object_or_404(Tag, name=slug)
def items(self, obj):
"""Items are the published entries of the tag"""
return TaggedItem.objects.get_by_model(
Entry.published.all(), obj)[:FEEDS_MAX_ITEMS]
def link(self, obj):
"""URL of the tag"""
return reverse('zinnia_tag_detail', args=[obj.name])
def title(self, obj):
"""Title of the feed"""
return _('Entries for the tag %s') % obj.name
def description(self, obj):
"""Description of the feed"""
return _('The latest entries for the tag %s') % obj.name
class SearchEntries(EntryFeed):
"""Feed filtered by a search pattern"""
def get_object(self, request, slug):
"""The slug is the pattern to search"""
return slug
def items(self, obj):
"""Items are the published entries founds"""
return Entry.published.search(obj)[:FEEDS_MAX_ITEMS]
def link(self, obj):
"""URL of the search request"""
return '%s?pattern=%s' % (reverse('zinnia_entry_search'), obj)
def title(self, obj):
"""Title of the feed"""
return _("Results of the search for '%s'") % obj
def description(self, obj):
"""Description of the feed"""
return _("The entries containing the pattern '%s'") % obj
class EntryDiscussions(ZinniaFeed):
"""Feed for discussions in an entry"""
title_template = 'feeds/discussion_title.html'
description_template = 'feeds/discussion_description.html'
def get_object(self, request, slug):
"""Retrieve the discussions by entry's slug"""
return get_object_or_404(Entry, slug=slug)
def items(self, obj):
"""Items are the discussions on the entry"""
return obj.discussions[:FEEDS_MAX_ITEMS]
def item_pubdate(self, item):
"""Publication date of a discussion"""
return item.submit_date
def item_link(self, item):
"""URL of the discussion"""
return item.get_absolute_url()
def link(self, obj):
"""URL of the entry"""
return obj.get_absolute_url()
def item_author_name(self, item):
"""Author of the discussion"""
return item.userinfo['name']
def item_author_email(self, item):
"""Author's email of the discussion"""
return item.userinfo['email']
def item_author_link(self, item):
"""Author's URL of the discussion"""
return item.userinfo['url']
def title(self, obj):
"""Title of the feed"""
return _('Discussions on %s') % obj.title
def description(self, obj):
"""Description of the feed"""
return _('The latest discussions for the entry %s') % obj.title
class EntryComments(EntryDiscussions):
"""Feed for comments in an entry"""
title_template = 'feeds/comment_title.html'
description_template = 'feeds/comment_description.html'
def items(self, obj):
"""Items are the comments on the entry"""
return obj.comments[:FEEDS_MAX_ITEMS]
def item_link(self, item):
"""URL of the comment"""
return item.get_absolute_url('#comment_%(id)s')
def title(self, obj):
"""Title of the feed"""
return _('Comments on %s') % obj.title
def description(self, obj):
"""Description of the feed"""
return _('The latest comments for the entry %s') % obj.title
def item_enclosure_url(self, item):
"""Returns a gravatar image for enclosure"""
return get_gravatar(item.userinfo['email'])
def item_enclosure_length(self, item):
"""Hardcoded enclosure length"""
return '100000'
def item_enclosure_mime_type(self, item):
"""Hardcoded enclosure mimetype"""
return 'image/jpeg'
class EntryPingbacks(EntryDiscussions):
"""Feed for pingbacks in an entry"""
title_template = 'feeds/pingback_title.html'
description_template = 'feeds/pingback_description.html'
def items(self, obj):
"""Items are the pingbacks on the entry"""
return obj.pingbacks[:FEEDS_MAX_ITEMS]
def item_link(self, item):
"""URL of the pingback"""
return item.get_absolute_url('#pingback_%(id)s')
def title(self, obj):
"""Title of the feed"""
return _('Pingbacks on %s') % obj.title
def description(self, obj):
"""Description of the feed"""
return _('The latest pingbacks for the entry %s') % obj.title
class EntryTrackbacks(EntryDiscussions):
"""Feed for trackbacks in an entry"""
title_template = 'feeds/trackback_title.html'
description_template = 'feeds/trackback_description.html'
def items(self, obj):
"""Items are the trackbacks on the entry"""
return obj.trackbacks[:FEEDS_MAX_ITEMS]
def item_link(self, item):
"""URL of the trackback"""
return item.get_absolute_url('#trackback_%(id)s')
def title(self, obj):
"""Title of the feed"""
return _('Trackbacks on %s') % obj.title
def description(self, obj):
"""Description of the feed"""
return _('The latest trackbacks for the entry %s') % obj.title
| [
"[email protected]"
] | |
a80d8b96674191feb176a2faa0f67317a629e280 | e0ae697de14078f97287ad2b917af050805085bc | /swan/utils/plot.py | dc1a1ff5f1ac1d0bf94d888067d3fc84c323d007 | [
"Apache-2.0"
] | permissive | nlesc-nano/swan | 08204936e597017cff517c9a5b8f41262faba9c1 | 4edc9dc363ce901b1fcc19444bec42fc5930c4b9 | refs/heads/main | 2023-05-23T21:50:45.928108 | 2023-05-09T13:35:58 | 2023-05-09T13:35:58 | 191,957,101 | 15 | 1 | Apache-2.0 | 2023-05-09T13:36:29 | 2019-06-14T14:29:57 | Python | UTF-8 | Python | false | false | 2,809 | py | """Miscellaneous plot functions."""
from pathlib import Path
from typing import Any, Iterator, List
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from scipy import stats
from ..modeller.gp_modeller import GPMultivariate
plt.switch_backend('agg')
def create_confidence_plot(
multi: GPMultivariate, expected: np.ndarray, prop: str,
output_name: str = "confidence_scatterplot") -> None:
"""Plot the results predicted multivariated results using confidence intervals."""
data = pd.DataFrame({
"expected": expected, "predicted": multi.mean, "confidence": (multi.upper - multi.lower) * 0.5,
"lower": multi.lower, "upper": multi.upper})
_, ax = plt.subplots(1, 1, figsize=(10, 10))
sns.lineplot(x="expected", y="expected", data=data, ax=ax)
sns.scatterplot(x="expected", y="predicted", data=data, ax=ax, size="confidence", hue="confidence", sizes=(10, 100))
path = Path(".") / f"{output_name}.png"
plt.savefig(path)
def create_scatter_plot(
predicted: np.ndarray, expected: np.ndarray, properties: List[str],
output_name: str = "scatterplot") -> None:
"""Plot the predicted vs the expected values."""
sns.set()
# Dataframes with the results
columns_predicted = [f"{p}_predicted" for p in properties]
columns_expected = [f"{p}_expected" for p in properties]
df_predicted = pd.DataFrame(predicted, columns=columns_predicted)
df_expected = pd.DataFrame(expected, columns=columns_expected)
data = pd.concat((df_predicted, df_expected), axis=1)
# Number of features
nfeatures = predicted.shape[1]
# Create a subplot with at most 3 features per line
rows = (nfeatures // 3) + (0 if nfeatures % 3 == 0 else 1)
ncols = nfeatures if nfeatures < 3 else 3
_, axis = plt.subplots(nrows=rows, ncols=ncols, figsize=(20, 20), constrained_layout=True)
# fig.tight_layout()
if rows == 1:
axis = [axis]
for row, labels in enumerate(chunks_of(list(zip(columns_expected, columns_predicted)), 3)):
for col, (label_x, label_y) in enumerate(labels):
ax = axis[row][col] if nfeatures > 1 else axis[0]
sns.regplot(x=label_x, y=label_y, data=data, ax=ax)
path = Path(".") / f"{output_name}.png"
plt.savefig(path)
print(f"{'name':40} slope intercept rvalue stderr")
for k, name in enumerate(properties):
# Print linear regression result
reg = stats.linregress(predicted[:, k], expected[:, k])
print(f"{name:40} {reg.slope:.3f} {reg.intercept:.3f} {reg.rvalue:.3f} {reg.stderr:.3e}")
def chunks_of(data: List[Any], n: int) -> Iterator[Any]:
"""Return chunks of ``n`` from ``data``."""
for i in range(0, len(data), n):
yield data[i:i + n]
| [
"[email protected]"
] | |
224bd1bc8c27e6c4399377783026c3ecac538f61 | 06813e2a80e35fad2abc334f291ca201dd10a762 | /torchTools/detectionv2/loss/centerLoss.py | 5a26f9c5ca9c6fe143696fa1ea767cabce9d00f9 | [] | no_license | wucng/torchTools | 0f912fbdc60a1373e26c6a4775d6b7be934b9f4f | fcabdbc4de4217addd87534c2ef8272e47958581 | refs/heads/master | 2021-05-21T08:46:24.096012 | 2020-06-10T02:02:46 | 2020-06-10T02:02:46 | 252,623,300 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,047 | py | """
# 结合SSD的思想
# yolov2,v3内置的先念框,以网格左上角为中心,预设置的w,h作为先念框的大小
# 统一都缩减到输入 图像上
# 在结合SSD的方式筛选正负样本(根据IOU计算)
try:
from .boxestool import batched_nms
except:
from boxestool import batched_nms
"""
from .nms_pytorch import nms,nms2
from torch import nn
import torch
from torch.nn import functional as F
import random
import numpy as np
from .focalLoss import smooth_l1_loss_jit,giou_loss_jit,\
sigmoid_focal_loss_jit,softmax_focal_loss_jit
# import math
from math import sqrt
class CenterLoss(nn.Module):
def __init__(self,cfg,device="cpu"):
super(CenterLoss, self).__init__()
self.device = device
self.num_anchors = cfg["network"]["RPN"]["num_boxes"]
self.num_classes = cfg["network"]["RPN"]["num_classes"]
self.threshold_conf = cfg["work"]["loss"]["threshold_conf"]
self.threshold_cls = cfg["work"]["loss"]["threshold_cls"]
self.conf_thres = cfg["work"]["loss"]["conf_thres"]
self.nms_thres = cfg["work"]["loss"]["nms_thres"]
self.filter_labels = cfg["work"]["train"]["filter_labels"]
self.useFocal = cfg["work"]["loss"]["useFocal"]
self.alpha = cfg["work"]["loss"]["alpha"]
self.gamma = cfg["work"]["loss"]["gamma"]
self.strides = cfg["network"]["backbone"]["strides"]
self.resize = cfg["work"]["train"]["resize"]
def forward(self,preds,targets):
if "boxes" not in targets[0]:
# return self.predict(preds,targets)
results = self.predict(preds,targets)
results = [self.apply_nms(result) for result in results]
return results
else:
return self.compute_loss(preds, targets)
def compute_loss(self,preds_list, targets_origin):
"""
:param preds:
if mulScale: # 使用多尺度(2个特征为例,batch=2)
preds=[[(1,28,28,12),(1,14,14,12)],[(1,28,28,12),(1,14,14,12)]]
else: #(2个特征为例,batch=2)
preds=[(2,28,28,12),(2,14,14,12)]
:param targets:
[{"boxes":(n,4),"labels":(n,)},{"boxes":(m,4),"labels":(m,)}]
:return:
"""
losses = {
"loss_conf": 0,
"loss_no_conf": 0,
"loss_box": 0,
"loss_clf": 0,
"loss_no_clf": 0,
"loss_iou": 0
}
# normalize
targets = self.normalize(targets_origin)
pred_cls,pred_box = preds_list
index = targets[..., 4] == 1
no_index = targets[..., 4] != 1
conf = pred_cls[index][...,0]
no_conf = pred_cls[no_index][...,0]
box = pred_box[index]
cls = pred_cls[index][...,1:]
no_cls = pred_cls[no_index][...,1:]
tbox = targets[index][...,:4]
tcls = targets[index][...,5:]
loss_conf = sigmoid_focal_loss_jit(conf, torch.ones_like(conf).detach(),
self.alpha, self.gamma, reduction="sum")
loss_no_conf = sigmoid_focal_loss_jit(no_conf, torch.zeros_like(no_conf).detach(),
self.alpha, self.gamma, reduction="sum")
loss_box = smooth_l1_loss_jit(torch.sigmoid(box), tbox.detach(), 2e-5, reduction="sum")
loss_clf = sigmoid_focal_loss_jit(cls, tcls.detach(),
self.alpha, self.gamma, reduction="sum")
loss_no_clf = F.mse_loss(torch.sigmoid(no_cls), torch.zeros_like(no_cls).detach(),
reduction="sum")
# iou loss
loss_iou = giou_loss_jit(xywh2x1y1x2y2(torch.sigmoid(box)),xywh2x1y1x2y2(tbox).detach(), reduction="sum")
losses["loss_conf"] += loss_conf
losses["loss_no_conf"] += loss_no_conf * 0.05 # 0.05
losses["loss_box"] += loss_box # 50
losses["loss_clf"] += loss_clf
losses["loss_no_clf"] += loss_no_clf * 0.05
losses["loss_iou"] += loss_iou * 10.
return losses
def normalize(self,targets):
last_result = []
for target in targets:
result_list = []
h, w = target["resize"]
boxes = target["boxes"]
labels = target["labels"]
for stride in self.strides:
grid_ceil_h, grid_ceil_w = h//stride,w//stride
strides_h,strides_w=stride,stride
result = torch.zeros([1, grid_ceil_h, grid_ceil_w,
self.num_anchors, 5 + self.num_classes],
dtype=torch.float32,
device=self.device)
idx = 0
# x1,y1,x2,y2->x0,y0,w,h
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
# [x0,y0,w,h]
x0 = (x1 + x2) / 2.
y0 = (y1 + y2) / 2.
w_b = (x2 - x1) / w # 0~1
h_b = (y2 - y1) / h # 0~1
# 判断box落在哪个grid ceil
# 取格网左上角坐标
grid_ceil = ((x0 / strides_w).int(), (y0 / strides_h).int())
# normal 0~1
# gt_box 中心点坐标-对应grid cell左上角的坐标/ 格网大小使得范围变成0到1
# x0 = (x0 - grid_ceil[0].float() * strides_w) / w
# y0 = (y0 - grid_ceil[1].float() * strides_h) / h
x0 /= w
y0 /= h
for i, (y, x) in enumerate(zip(grid_ceil[1], grid_ceil[0])):
result[idx, y, x, :,0] = x0[i]
result[idx, y, x, :,1] = y0[i]
result[idx, y, x, :,2] = w_b[i]
result[idx, y, x, :,3] = h_b[i]
result[idx, y, x, :,4] = 1 # 置信度
result[idx, y, x, :,5+int(labels[i])] = 1 # 转成one-hot
result_list.append(result.view(1,-1,5 + self.num_classes))
last_result.append(torch.cat(result_list,1))
return torch.cat(last_result,0)
def predict(self, preds_list,targets_origin):
"""
:param preds_list:
#(2个特征为例,batch=2)
preds_list=[(2,28,28,12),(2,14,14,12)]
:param targets_origin:
[{"resize":(h,w),"origin_size":(h,w)},{"resize":(h,w),"origin_size":(h,w)}]
:return:
"""
pred_clses, pred_boxes = preds_list
pred_clses, pred_boxes = torch.sigmoid(pred_clses),torch.sigmoid(pred_boxes)
pred_boxes = self.reverse_normalize(pred_boxes)
result = []
for i,(pred_cls,pred_box) in enumerate(zip(pred_clses,pred_boxes)):
confidence = pred_cls[...,0]
pred_cls = pred_cls[...,1:]
condition = (pred_cls * confidence).max(dim=0)[0] > self.threshold_conf
keep = torch.nonzero(condition).squeeze(1)
if len(keep)==0:
pred_box = torch.zeros([1,4],dtype=pred_box.dtype,device=pred_box.device)
scores = torch.zeros([1,],dtype=pred_box.dtype,device=pred_box.device)
labels = torch.zeros([1,],dtype=torch.long,device=pred_box.device)
confidence = torch.zeros([1,],dtype=pred_box.dtype,device=pred_box.device)
else:
pred_box = pred_box[keep]
pred_cls = pred_cls[keep]
confidence = confidence[keep]
# labels and scores
# scores, labels = torch.softmax(pred_cls, -1).max(dim=1)
scores, labels = pred_cls.max(dim=1)
# 过滤分类分数低的
# keep = torch.nonzero(scores > self.threshold_cls).squeeze(1)
keep = torch.nonzero(scores > self.threshold_cls).squeeze(1)
if len(keep)==0:
pred_box = torch.zeros([1, 4], dtype=pred_box.dtype, device=pred_box.device)
scores = torch.zeros([1, ], dtype=pred_box.dtype, device=pred_box.device)
labels = torch.zeros([1, ], dtype=torch.long, device=pred_box.device)
confidence = torch.zeros([1, ], dtype=pred_box.dtype, device=pred_box.device)
else:
pred_box, scores, labels, confidence = pred_box[keep], scores[keep], labels[keep], confidence[keep]
result.append({"boxes": pred_box, "scores": scores, "labels": labels, "confidence": confidence})
result[i].update(targets_origin[i])
return result
def reverse_normalize(self,pboxes):
# [x0,y0,w,h]-->normalize 0~1--->[x1,y1,x2,y2]
bs = pboxes.size(0)
pboxes = pboxes.view(bs,-1,self.num_anchors,4)
for i,cboxes in enumerate(pboxes):
index = 0
h, w = self.resize
for stride in self.strides:
strides_h, strides_w = stride, stride
h_f,w_f = h//strides_h, w//strides_w
boxes = cboxes[index:index+h_f*w_f,...]
# to 格网(x,y) 格式
# temp = torch.arange(0, len(boxes))
# grid_y = temp // w_f
# grid_x = temp - grid_y * w_f
for j in range(self.num_anchors):
x0 = boxes[:,j, 0] * w #+ (grid_x * strides_w).float().to(self.device)
y0 = boxes[:,j, 1] * h #+ (grid_y * strides_h).float().to(self.device)
w_b = boxes[:,j, 2] * w
h_b = boxes[:,j, 3] * h
x1 = x0 - w_b / 2.
y1 = y0 - h_b / 2.
x2 = x0 + w_b / 2.
y2 = y0 + h_b / 2.
# 裁剪到框内
x1 = x1.clamp(0,w)
x2 = x2.clamp(0,w)
y1 = y1.clamp(0,h)
y2 = y2.clamp(0,h)
boxes[:, j, 0] = x1
boxes[:, j, 1] = y1
boxes[:, j, 2] = x2
boxes[:, j, 3] = y2
pboxes[i,index:index+h_f*w_f,...] = boxes
index += h_f*w_f
return pboxes.view(bs,-1,4)
def apply_nms(self,prediction):
# for idx,prediction in enumerate(detections):
# 1.先按scores过滤分数低的,过滤掉分数小于conf_thres
ms = prediction["scores"] > self.conf_thres
if torch.sum(ms) == 0:
return None
else:
last_scores = []
last_labels = []
last_boxes = []
# 2.类别一样的按nms过滤,如果Iou大于nms_thres,保留分数最大的,否则都保留
# 按阈值过滤
scores = prediction["scores"][ms]
labels = prediction["labels"][ms]
boxes = prediction["boxes"][ms]
unique_labels = labels.unique()
for c in unique_labels:
if c in self.filter_labels: continue
# Get the detections with the particular class
temp = labels == c
_scores = scores[temp]
_labels = labels[temp]
_boxes = boxes[temp]
if len(_labels) > 1:
# Sort the detections by maximum objectness confidence
# _, conf_sort_index = torch.sort(_scores, descending=True)
# _scores=_scores[conf_sort_index]
# _boxes=_boxes[conf_sort_index]
# """
# keep=py_cpu_nms(_boxes.cpu().numpy(),_scores.cpu().numpy(),self.nms_thres)
keep = nms2(_boxes, _scores, self.nms_thres)
# keep = batched_nms(_boxes, _scores, _labels, self.nms_thres)
last_scores.extend(_scores[keep])
last_labels.extend(_labels[keep])
last_boxes.extend(_boxes[keep])
else:
last_scores.extend(_scores)
last_labels.extend(_labels)
last_boxes.extend(_boxes)
if len(last_labels)==0:
return None
# resize 到原图上
h_ori,w_ori = prediction["original_size"]
h_re,w_re = prediction["resize"]
h_ori = h_ori.float()
w_ori = w_ori.float()
# to pad图上
if h_ori > w_ori:
h_scale = h_ori/h_re
w_scale = h_ori/w_re
# 去除pad部分
diff = h_ori - w_ori
for i in range(len(last_boxes)):
last_boxes[i][[0,2]]*=w_scale
last_boxes[i][[1,3]]*=h_scale
last_boxes[i][0] -= diff // 2
last_boxes[i][2] -= diff-diff // 2
else:
h_scale = w_ori / h_re
w_scale = w_ori / w_re
diff = w_ori - h_ori
for i in range(len(last_boxes)):
last_boxes[i][[0,2]]*=w_scale
last_boxes[i][[1,3]]*=h_scale
last_boxes[i][1] -= diff // 2
last_boxes[i][3] -= diff - diff // 2
return {"scores": last_scores, "labels": last_labels, "boxes": last_boxes}
def box_area(boxes):
"""
Computes the area of a set of bounding boxes, which are specified by its
(x0, y0, x1, y1) coordinates.
Arguments:
boxes (Tensor[N, 4]): boxes for which the area will be computed. They
are expected to be in (x0, y0, x1, y1) format
Returns:
area (Tensor[N]): area for each box
"""
return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
# implementation from https://github.com/kuangliu/torchcv/blob/master/torchcv/utils/box.py
# with slight modifications
def box_iou(boxes1, boxes2):
"""
Return intersection-over-union (Jaccard index) of boxes.
Arguments:
boxes1 (Tensor[N, 4])
boxes2 (Tensor[M, 4])
Returns:
iou (Tensor[N, M]): the NxM matrix containing the pairwise
IoU values for every element in boxes1 and boxes2
"""
area1 = box_area(boxes1)
area2 = box_area(boxes2)
lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]
rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]
wh = (rb - lt).clamp(min=0) # [N,M,2]
inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]
iou = inter / (area1[:, None] + area2 - inter)
return iou
def xywh2x1y1x2y2(boxes):
"""
:param boxes: [...,4]
:return:
"""
x1y1=boxes[...,:2]-boxes[...,2:]/2
x2y2=boxes[...,:2]+boxes[...,2:]/2
return torch.cat((x1y1,x2y2),-1)
def x1y1x2y22xywh(boxes):
"""
:param boxes: [...,4]
:return:
"""
xy=(boxes[...,:2]+boxes[...,2:])/2
wh=boxes[...,2:]-boxes[...,:2]
return torch.cat((xy,wh),-1) | [
"[email protected]"
] | |
aa6b4b76ac5f339ebfaaa0326ce457417d479c95 | fb8cbebdf034b2f478943752d5443afc82c6eef5 | /tuirer/venv/lib/python3.6/site-packages/IPython/testing/tests/test_decorators.py | be8bb30f24658acafa1b8a18bf0f3b6ce66ddae9 | [] | no_license | fariasjr/CitiTuirer | f64e0ec93ef088f8140bb0961d2ad4ed3b59448a | deb3f7a9c2d45b8a7f54639037f097b99abdac11 | refs/heads/master | 2020-03-24T05:10:36.261050 | 2018-08-01T20:24:30 | 2018-08-01T20:24:30 | 142,477,521 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,903 | py | """Tests for the decorators we've created for IPython.
"""
# Module imports
# Std lib
import inspect
import sys
from IPython.testing import decorators as dec
# Third party
import nose.tools as nt
#-----------------------------------------------------------------------------
# Utilities
# Note: copied from OInspect, kept here so the testing stuff doesn't create
# circular dependencies and is easier to reuse.
def getargspec(obj):
"""Get the names and default values of a function's arguments.
A tuple of four things is returned: (args, varargs, varkw, defaults).
'args' is a list of the argument names (it may contain nested lists).
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'defaults' is an n-tuple of the default values of the last n arguments.
Modified version of inspect.getargspec from the Python Standard
Library."""
if inspect.isfunction(obj):
func_obj = obj
elif inspect.ismethod(obj):
func_obj = obj.__func__
else:
raise TypeError('arg is not a Python function')
args, varargs, varkw = inspect.getargs(func_obj.__code__)
return args, varargs, varkw, func_obj.__defaults__
#-----------------------------------------------------------------------------
# Testing functions
@dec.as_unittest
def trivial():
"""A trivial test"""
pass
@dec.skip
def test_deliberately_broken():
"""A deliberately broken test - we want to skip this one."""
1/0
@dec.skip('Testing the skip decorator')
def test_deliberately_broken2():
"""Another deliberately broken test - we want to skip this one."""
1/0
# Verify that we can correctly skip the doctest for a function at will, but
# that the docstring itself is NOT destroyed by the decorator.
def doctest_bad(x,y=1,**k):
"""A function whose doctest we need to skip.
>>> 1+1
3
"""
print('x:',x)
print('y:',y)
print('k:',k)
def call_doctest_bad():
"""Check that we can still call the decorated functions.
>>> doctest_bad(3,y=4)
x: 3
y: 4
k: {}
"""
pass
def test_skip_dt_decorator():
"""Doctest-skipping decorator should preserve the docstring.
"""
# Careful: 'check' must be a *verbatim* copy of the doctest_bad docstring!
check = """A function whose doctest we need to skip.
>>> 1+1
3
"""
# Fetch the docstring from doctest_bad after decoration.
val = doctest_bad.__doc__
nt.assert_equal(check,val,"doctest_bad docstrings don't match")
# Doctest skipping should work for class methods too
class FooClass(object):
"""FooClass
Example:
>>> 1+1
2
"""
def __init__(self,x):
"""Make a FooClass.
Example:
>>> f = FooClass(3)
junk
"""
print('Making a FooClass.')
self.x = x
def bar(self,y):
"""Example:
>>> ff = FooClass(3)
>>> ff.bar(0)
boom!
>>> 1/0
bam!
"""
return 1/y
def baz(self,y):
"""Example:
>>> ff2 = FooClass(3)
Making a FooClass.
>>> ff2.baz(3)
True
"""
return self.x==y
def test_skip_dt_decorator2():
"""Doctest-skipping decorator should preserve function signature.
"""
# Hardcoded correct answer
dtargs = (['x', 'y'], None, 'k', (1,))
# Introspect out the value
dtargsr = getargspec(doctest_bad)
assert dtargsr==dtargs, \
"Incorrectly reconstructed args for doctest_bad: %s" % (dtargsr,)
@dec.skip_linux
def test_linux():
nt.assert_false(sys.platform.startswith('linux'),"This test can't run under linux")
@dec.skip_win32
def test_win32():
nt.assert_not_equal(sys.platform,'win32',"This test can't run under windows")
@dec.skip_osx
def test_osx():
nt.assert_not_equal(sys.platform,'darwin',"This test can't run under osx")
| [
"[email protected]"
] | |
c93b63bb6a2ee9bde97722c20735dd3e19260a1f | 2be3ad1c6413c9eb74819773e82c9cfe973f2fbb | /mama_cas/cas.py | 2057a703a19b10904887a217008626fee552bf60 | [
"BSD-3-Clause"
] | permissive | ilCapo77/django-mama-cas | 271d706eae24ea87c9280e46e5ddcfb1fd5c3d7f | 42ec942c207e5c4e7e40d7bb74551682c2803d81 | refs/heads/master | 2020-12-28T04:49:05.463591 | 2016-08-22T16:41:14 | 2016-08-22T16:41:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,549 | py | import logging
from django.contrib import messages
from django.contrib.auth import logout
from django.utils.module_loading import import_string
from django.utils.translation import ugettext_lazy as _
from mama_cas.compat import is_authenticated
from mama_cas.exceptions import InvalidTicketSpec
from mama_cas.exceptions import ValidationError
from mama_cas.models import ServiceTicket
from mama_cas.models import ProxyTicket
from mama_cas.models import ProxyGrantingTicket
from mama_cas.services import get_callbacks
logger = logging.getLogger(__name__)
def validate_service_ticket(service, ticket, pgturl, renew=False, require_https=False):
"""
Validate a service ticket string. Return a triplet containing a
``ServiceTicket`` and an optional ``ProxyGrantingTicket``, or a
``ValidationError`` if ticket validation failed.
"""
logger.debug("Service validation request received for %s" % ticket)
# Check for proxy tickets passed to /serviceValidate
if ticket and ticket.startswith(ProxyTicket.TICKET_PREFIX):
e = InvalidTicketSpec('Proxy tickets cannot be validated with /serviceValidate')
logger.warning("%s %s" % (e.code, e))
return None, None, e
try:
st = ServiceTicket.objects.validate_ticket(ticket, service, renew=renew, require_https=require_https)
except ValidationError as e:
logger.warning("%s %s" % (e.code, e))
return None, None, e
else:
if pgturl:
logger.debug("Proxy-granting ticket request received for %s" % pgturl)
pgt = ProxyGrantingTicket.objects.create_ticket(service, pgturl, user=st.user, granted_by_st=st)
else:
pgt = None
return st, pgt, None
def validate_proxy_ticket(service, ticket, pgturl):
"""
Validate a proxy ticket string. Return a 4-tuple containing a
``ProxyTicket``, an optional ``ProxyGrantingTicket`` and a list
of proxies through which authentication proceeded, or a
``ValidationError`` if ticket validation failed.
"""
logger.debug("Proxy validation request received for %s" % ticket)
try:
pt = ProxyTicket.objects.validate_ticket(ticket, service)
except ValidationError as e:
logger.warning("%s %s" % (e.code, e))
return None, None, None, e
else:
# Build a list of all services that proxied authentication,
# in reverse order of which they were traversed
proxies = [pt.service]
prior_pt = pt.granted_by_pgt.granted_by_pt
while prior_pt:
proxies.append(prior_pt.service)
prior_pt = prior_pt.granted_by_pgt.granted_by_pt
if pgturl:
logger.debug("Proxy-granting ticket request received for %s" %
pgturl)
pgt = ProxyGrantingTicket.objects.create_ticket(service, pgturl, user=pt.user, granted_by_pt=pt)
else:
pgt = None
return pt, pgt, proxies, None
def validate_proxy_granting_ticket(pgt, target_service):
"""
Validate a proxy granting ticket string. Return an ordered pair
containing a ``ProxyTicket``, or a ``ValidationError`` if ticket
validation failed.
"""
logger.debug("Proxy ticket request received for %s using %s" % (target_service, pgt))
try:
pgt = ProxyGrantingTicket.objects.validate_ticket(pgt, target_service)
except ValidationError as e:
logger.warning("%s %s" % (e.code, e))
return None, e
else:
pt = ProxyTicket.objects.create_ticket(service=target_service, user=pgt.user, granted_by_pgt=pgt)
return pt, None
def get_attributes(user, service):
"""
Return a dictionary of user attributes from the set of configured
callback functions.
"""
attributes = {}
for path in get_callbacks(service):
callback = import_string(path)
attributes.update(callback(user, service))
return attributes
def logout_user(request):
"""End a single sign-on session for the current user."""
logger.debug("Logout request received for %s" % request.user)
if is_authenticated(request.user):
ServiceTicket.objects.consume_tickets(request.user)
ProxyTicket.objects.consume_tickets(request.user)
ProxyGrantingTicket.objects.consume_tickets(request.user)
ServiceTicket.objects.request_sign_out(request.user)
logger.info("Single sign-on session ended for %s" % request.user)
logout(request)
messages.success(request, _('You have been successfully logged out'))
| [
"[email protected]"
] | |
6d7ad8317951def58ca9c5faab64df8b4f880b25 | ef54d37f8a3303013ca7469871a320d303957ed7 | /robo4.2/fusion/tests/RIST/API/Deprecated/OVF2458_Q2_NFR_Ring_Backup_Restore/Daisy_Data.py | 5ee611da9574225b303821b27cf89552a579824b | [] | no_license | richa92/Jenkin_Regression_Testing | d18badfcf16bda682dfe7bcbbd66f54a9a27a58d | 24a74926170cbdfafa47e972644e2fe5b627d8ff | refs/heads/master | 2020-07-12T10:01:59.099137 | 2019-08-27T12:14:53 | 2019-08-27T12:14:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,409 | py | admin_credentials = {'userName': 'Administrator', 'password': 'Wpst@hpvse123#!'}
# Existing Resources from previous test\
ENC1 = "CN754404R9"
ENC2 = "CN754406WS"
ENC3 = "CN754406WB"
Bay = "5"
SH = ENC2 + ", bay " + Bay
LIG_Exist = "LIG1"
EG_Exist = "EG1"
# Created "extra" resources
LIG_NAME = "BR_LIG"
EG_NAME = "BR_EG"
# Used after backup created
Post_backup_ENC = ENC3
Post_backup_LIG_NAME = "Post_backup_BR_LIG"
Post_backup_EG_NAME = "Post_backup_BR_EG"
Post_backup_SH = Post_backup_ENC + ", bay " + Bay
# Ethernet Networks
NET1 = 'net_100'
NET2 = 'net_300'
BR_ethernet_networks = [
{'name': 'BR_Network1',
'type': 'ethernet-networkV4',
'vlanId': 0,
'subnetUri': None,
'purpose': 'General',
'smartLink': True,
'privateNetwork': False,
'connectionTemplateUri': None,
'ethernetNetworkType': 'Tunnel'},
{'name': 'BR_Network2',
'type': 'ethernet-networkV4',
'vlanId': 1,
'purpose': 'General',
'smartLink': True,
'privateNetwork': False,
'connectionTemplateUri': None,
'ethernetNetworkType': 'Untagged'},
{'name': 'BR_Network3',
'type': 'ethernet-networkV4',
'vlanId': 3,
'purpose': 'General',
'smartLink': True,
'privateNetwork': False,
'connectionTemplateUri': None,
'ethernetNetworkType': 'Tagged'},
{'name': 'BR_Network4',
'type': 'ethernet-networkV4',
'vlanId': 4,
'purpose': 'General',
'smartLink': True,
'privateNetwork': False,
'connectionTemplateUri': None,
'ethernetNetworkType': 'Tagged'}
]
icmap = [{'bay': 3, 'enclosure': 1, 'type': 'Virtual Connect SE 40Gb F8 Module for Synergy', 'enclosureIndex': 1},
{'bay': 6, 'enclosure': 1, 'type': 'Virtual Connect SE 40Gb F8 Module for Synergy', 'enclosureIndex': 1}
]
BR_lig = [{
"enclosureIndexes": [
1
],
"description": None,
"telemetryConfiguration": {
"description": None,
"sampleCount": 12,
"enableTelemetry": True,
"sampleInterval": 300,
"type": "telemetry-configuration",
"name": None
},
"internalNetworkUris": [],
"name": LIG_NAME,
"uplinkSets": [
{
"networkUris": [
NET1
],
"ethernetNetworkType": "Tagged",
"name": "ULS1",
"lacpTimer": "Short",
"primaryPort": None,
"nativeNetworkUri": None,
"mode": "Auto",
"networkType": "Ethernet",
"logicalPortConfigInfos": [{'enclosure': '1', 'bay': '3', 'port': 'Q1.1', 'speed': 'Auto'}]
},
{
"networkUris": [
NET2
],
"ethernetNetworkType": "Tagged",
"name": "ULS2",
"lacpTimer": "Short",
"primaryPort": None,
"nativeNetworkUri": None,
"mode": "Auto",
"networkType": "Ethernet",
"logicalPortConfigInfos": [{'enclosure': '1', 'bay': '6', 'port': 'Q2.1', 'speed': 'Auto'}]
}
],
"redundancyType": "Redundant",
"enclosureType": "SY12000",
"fabricUri": "Fabric:DefaultFabric",
"qosConfiguration": {
"name": None,
"type": "qos-aggregated-configuration",
"inactiveFCoEQosConfig": None,
"inactiveNonFCoEQosConfig": None,
"activeQosConfig": {
"description": None,
"configType": "Passthrough",
"downlinkClassificationType": None,
"uplinkClassificationType": None,
"qosTrafficClassifiers": [],
"type": "QosConfiguration",
"name": None
},
"description": None
},
"type": "logical-interconnect-groupV4",
"interconnectMapTemplate": icmap,
"ethernetSettings": {
"interconnectType": "Ethernet",
"igmpIdleTimeoutInterval": 260,
"macRefreshInterval": 5,
"description": None,
"enableTaggedLldp": False,
"enableRichTLV": False,
"enableNetworkLoopProtection": True,
"enableFastMacCacheFailover": True,
"lldpIpv4Address": "",
"enableIgmpSnooping": False,
"enablePauseFloodProtection": True,
"type": "EthernetInterconnectSettingsV4",
"lldpIpv6Address": "",
"name": "name2003202772-1470153927401"
},
"stackingHealth": None,
"stackingMode": None,
"interconnectBaySet": 3,
"snmpConfiguration": {
"description": None,
"readCommunity": "public",
"enabled": True,
"systemContact": "",
"snmpAccess": [],
"trapDestinations": [],
"type": "snmp-configuration",
"name": None
}
}]
BR_lig_validate = {
"uplinkSets": [
{
"networkUris": [
"ETH:" + NET2
],
"ethernetNetworkType": "Tagged",
"name": "ULS2",
"lacpTimer": "Short",
"primaryPort": None,
"nativeNetworkUri": None,
"reachability": None,
"mode": "Auto",
"networkType": "Ethernet",
"logicalPortConfigInfos": [
{
"desiredSpeed": "Auto",
"logicalLocation": {
"locationEntries": [
{
"type": "Port",
"relativeValue": 67
},
{
"type": "Enclosure",
"relativeValue": 1
},
{
"type": "Bay",
"relativeValue": 6
}
]
}
}
]
},
{
"networkUris": [
"ETH:" + NET1
],
"ethernetNetworkType": "Tagged",
"name": "ULS1",
"lacpTimer": "Short",
"primaryPort": None,
"nativeNetworkUri": None,
"reachability": None,
"mode": "Auto",
"networkType": "Ethernet",
"logicalPortConfigInfos": [
{
"desiredSpeed": "Auto",
"logicalLocation": {
"locationEntries": [
{
"type": "Port",
"relativeValue": 62
},
{
"type": "Enclosure",
"relativeValue": 1
},
{
"type": "Bay",
"relativeValue": 3
}
]
}
}
]
}
],
"stackingHealth": None,
"interconnectBaySet": 3,
"snmpConfiguration": {
"status": None,
"category": "snmp-configuration",
"description": None,
"readCommunity": "public",
"enabled": True,
"uri": None,
"systemContact": "",
"state": None,
"snmpAccess": [],
"trapDestinations": [],
"type": "snmp-configuration",
"name": None
},
"category": "logical-interconnect-groups",
"internalNetworkUris": [],
"state": "Active",
"qosConfiguration": {
"status": None,
"category": "qos-aggregated-configuration",
"inactiveFCoEQosConfig": None,
"name": None,
"activeQosConfig": {
"status": None,
"category": "qos-aggregated-configuration",
"description": None,
"uri": None,
"configType": "Passthrough",
"state": None,
"downlinkClassificationType": None,
"uplinkClassificationType": None,
"qosTrafficClassifiers": [],
"type": "QosConfiguration",
"name": None
},
"uri": None,
"state": None,
"inactiveNonFCoEQosConfig": None,
"type": "qos-aggregated-configuration",
"description": None
},
"interconnectMapTemplate": {
"interconnectMapEntryTemplates": [
{
"logicalLocation": {
"locationEntries": [
{
"type": "Enclosure",
"relativeValue": 1
},
{
"type": "Bay",
"relativeValue": 6
}
]
},
"permittedInterconnectTypeUri": "ICTypes:Virtual Connect SE 40Gb F8 Module for Synergy",
"enclosureIndex": 1
},
{
"logicalLocation": {
"locationEntries": [
{
"type": "Enclosure",
"relativeValue": 1
},
{
"type": "Bay",
"relativeValue": 3
}
]
},
"permittedInterconnectTypeUri": "ICTypes:Virtual Connect SE 40Gb F8 Module for Synergy",
"enclosureIndex": 1
}
]
},
"type": "logical-interconnect-groupV4",
"status": None,
"enclosureIndexes": [
1
],
"description": None,
"telemetryConfiguration": {
"status": None,
"category": "telemetry-configuration",
"description": None,
"sampleCount": 12,
"enableTelemetry": True,
"uri": None,
"state": None,
"sampleInterval": 300,
"type": "telemetry-configuration",
"name": None
},
"ethernetSettings": {
"category": None,
"status": None,
"igmpIdleTimeoutInterval": 260,
"macRefreshInterval": 5,
"description": None,
"enableTaggedLldp": False,
"enableRichTLV": False,
"enableNetworkLoopProtection": True,
"enableFastMacCacheFailover": True,
"lldpIpv4Address": "",
"enableIgmpSnooping": False,
"state": None,
"enablePauseFloodProtection": True,
"dependentResourceUri": "LIG:BR_LIG",
"interconnectType": "Ethernet",
"type": "EthernetInterconnectSettingsV4",
"lldpIpv6Address": ""
},
"stackingMode": None,
"name": "BR_LIG",
"uri": "LIG:BR_LIG",
"redundancyType": "Redundant",
"enclosureType": "SY12000",
"fabricUri": "Fabric:DefaultFabric"
}
BR_eg = [{
"osDeploymentSettings": {
"deploymentModeSettings": {
"deploymentNetworkUri": None,
"deploymentMode": "None"
},
"manageOSDeployment": False
},
"powerMode": "RedundantPowerFeed",
"interconnectBayMappings": [
{
"logicalInterconnectGroupUri": "LIG:" + LIG_NAME,
"interconnectBay": 3
},
{
"logicalInterconnectGroupUri": "LIG:" + LIG_NAME,
"interconnectBay": 6
}
],
"enclosureCount": 1,
"ipAddressingMode": "DHCP",
"ipRangeUris": [],
"name": EG_NAME
}]
Post_backup_ethernet_networks = [
{'name': 'POST_BR_Network111',
'type': 'ethernet-networkV4',
'vlanId': 111,
'subnetUri': None,
'purpose': 'General',
'smartLink': True,
'privateNetwork': False,
'connectionTemplateUri': None,
'ethernetNetworkType': 'Tagged'},
{'name': 'POST_BR_Network222',
'type': 'ethernet-networkV4',
'vlanId': 222,
'purpose': 'General',
'smartLink': True,
'privateNetwork': False,
'connectionTemplateUri': None,
'ethernetNetworkType': 'Tagged'},
{'name': 'POST_BR_Network333',
'type': 'ethernet-networkV4',
'vlanId': 333,
'purpose': 'General',
'smartLink': True,
'privateNetwork': False,
'connectionTemplateUri': None,
'ethernetNetworkType': 'Tagged'},
{'name': 'POST_BR_Network444',
'type': 'ethernet-networkV4',
'vlanId': 444,
'purpose': 'General',
'smartLink': True,
'privateNetwork': False,
'connectionTemplateUri': None,
'ethernetNetworkType': 'Tagged'},
]
icmap = [{'bay': 3, 'enclosure': 1, 'type': 'Virtual Connect SE 40Gb F8 Module for Synergy', 'enclosureIndex': 1},
{'bay': 6, 'enclosure': 1, 'type': 'Virtual Connect SE 40Gb F8 Module for Synergy', 'enclosureIndex': 1}
]
Post_backup_lig = [{
"enclosureIndexes": [
1
],
"description": None,
"telemetryConfiguration": {
"description": None,
"sampleCount": 12,
"enableTelemetry": True,
"sampleInterval": 300,
"type": "telemetry-configuration",
"name": None
},
"internalNetworkUris": [],
"name": Post_backup_LIG_NAME,
"uplinkSets": [
{
"networkUris": [
NET1
],
"ethernetNetworkType": "Tagged",
"name": "ULS1",
"lacpTimer": "Short",
"primaryPort": None,
"nativeNetworkUri": None,
"mode": "Auto",
"networkType": "Ethernet",
"logicalPortConfigInfos": [{'enclosure': '1', 'bay': '3', 'port': 'Q1.1', 'speed': 'Auto'}]
},
{
"networkUris": [
NET2
],
"ethernetNetworkType": "Tagged",
"name": "ULS2",
"lacpTimer": "Short",
"primaryPort": None,
"nativeNetworkUri": None,
"mode": "Auto",
"networkType": "Ethernet",
"logicalPortConfigInfos": [{'enclosure': '1', 'bay': '6', 'port': 'Q2.1', 'speed': 'Auto'}]
}
],
"redundancyType": "Redundant",
"enclosureType": "SY12000",
"fabricUri": "Fabric:DefaultFabric",
"qosConfiguration": {
"name": None,
"type": "qos-aggregated-configuration",
"inactiveFCoEQosConfig": None,
"inactiveNonFCoEQosConfig": None,
"activeQosConfig": {
"description": None,
"configType": "Passthrough",
"downlinkClassificationType": None,
"uplinkClassificationType": None,
"qosTrafficClassifiers": [],
"type": "QosConfiguration",
"name": None
},
"description": None
},
"type": "logical-interconnect-groupV4",
"interconnectMapTemplate": icmap,
"ethernetSettings": {
"interconnectType": "Ethernet",
"igmpIdleTimeoutInterval": 260,
"macRefreshInterval": 5,
"description": None,
"enableTaggedLldp": False,
"enableRichTLV": False,
"enableNetworkLoopProtection": True,
"enableFastMacCacheFailover": True,
"lldpIpv4Address": "",
"enableIgmpSnooping": False,
"enablePauseFloodProtection": True,
"dependentResourceUri": "LIG:LIG_NAME",
"type": "EthernetInterconnectSettingsV4",
"lldpIpv6Address": "",
"name": "name2003202772-1470153927401"
},
"stackingHealth": None,
"stackingMode": None,
"interconnectBaySet": 3,
"snmpConfiguration": {
"description": None,
"readCommunity": "public",
"enabled": True,
"systemContact": "",
"snmpAccess": [],
"trapDestinations": [],
"type": "snmp-configuration",
"name": None
}
}]
Post_backup_eg = [{
"osDeploymentSettings": {
"deploymentModeSettings": {
"deploymentNetworkUri": None,
"deploymentMode": "None"
},
"manageOSDeployment": False
},
"powerMode": "RedundantPowerFeed",
"interconnectBayMappings": [
{
"logicalInterconnectGroupUri": "LIG:" + Post_backup_LIG_NAME,
"interconnectBay": 3
},
{
"logicalInterconnectGroupUri": "LIG:" + Post_backup_LIG_NAME,
"interconnectBay": 6
}
],
"enclosureCount": 1,
"ipAddressingMode": "DHCP",
"ipRangeUris": [],
"name": Post_backup_EG_NAME
}]
| [
"[email protected]"
] | |
3749352f2804e412ae11dd21cac151c55e754eeb | bda2cafa8a5f0adb702aa618ff18372428ad9f84 | /artie.py | 5565427bac48805feff8e2e480cc82942fb78ed2 | [] | no_license | rogerhoward/artie3000 | eb2d5968d9b2fc19cb8ca75836ea4d0911ba3f87 | ec2afc6341029b0279b58b917a0e473e7462c5c5 | refs/heads/master | 2022-02-12T03:29:36.614009 | 2019-10-23T15:26:50 | 2019-10-23T15:26:50 | 177,835,173 | 1 | 0 | null | 2022-02-04T15:11:34 | 2019-03-26T17:13:53 | HTML | UTF-8 | Python | false | false | 715 | py | #!/usr/bin/env python
import asyncio
import websockets
import random, string
import simplejson as json
def random_id(len=10):
return ''.join(random.choices(string.ascii_lowercase + string.digits, k=len))
class Artie(object):
def __init__():
pass
async def bot():
async with websockets.connect(
'ws://local.codewithartie.com:8899/websocket') as websocket:
# name = input("What's your name? ")
msg_id = random_id()
vers = {"cmd": "version", "id": msg_id}
await websocket.send(json.dumps(vers))
print(f"> {vers}")
greeting = await websocket.recv()
print(f"< {greeting}")
asyncio.get_event_loop().run_until_complete(bot())
| [
"[email protected]"
] | |
473c4a2f4ee207e7a6e8c1b283db66b9a56da53c | e4d421593cfff40520c33559028754de3acf7be5 | /loki/redis/api.py | d05ad23060a924eaac33c823d5fbc24c55bc7034 | [] | no_license | dongzerun/wloki | 341d38982d950e526e7039336e4cb32f8cff5fca | e39819a5f5854795bf90f6a37c870f442ac16c60 | refs/heads/master | 2020-06-21T01:25:23.078936 | 2016-11-16T16:46:54 | 2016-11-16T16:46:54 | 74,812,144 | 1 | 1 | null | 2016-11-26T07:46:03 | 2016-11-26T07:46:03 | null | UTF-8 | Python | false | false | 17,574 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# [email protected]
import re
import json
import requests
from torext import params
from sqlalchemy import and_, or_
from .models import RedisInstance, CodisInstance
from ..db import get_redis
from ..base.handlers import APIHandler
from ..privilege import require_node_privileges
from .privileges import RedisPrivilege
def getDashboardAddr(name):
"""Get codis dashboard address using zk & product_name"""
tmp = name.split('-')
cluster, name = tmp[0], tmp[1]
select_obj = CodisInstance\
.query\
.filter(and_(CodisInstance.zk == tmp[0],
CodisInstance.name == tmp[1]))\
.order_by(CodisInstance.zk.asc(),
CodisInstance.name.asc())\
.limit(1)\
.all()
return cluster, name, select_obj[0].to_dict()['dashboard']
def getRange(data):
"""Get range expression from sequence.
For example, [1,2,3,6,7,8] produce [[1,3],[6,8]]"""
if len(data) == 0:
return []
data = sorted(data)
ret = []
a_range = {'low': data[0], 'high': data[0]}
for val in data:
if a_range['high'] == val:
continue
elif a_range['high'] + 1 == val:
a_range['high'] = val
else:
ret.append([a_range['low'], a_range['high']])
a_range['low'] = val
a_range['high'] = val
ret.append([a_range['low'], a_range['high']])
return ret
def check_path(func):
"""Decorator for checking whether the path is allowed by that method"""
def check_wrapper(self, name, path):
path = str(path)
if hasattr(self, 'allow') \
and func.__name__ in self.allow \
and not re.match('|'.join(self.allow[func.__name__]), path):
self.set_status(405)
self.write('Not allowed path "%s" for method "%s"'%(path, func.__name__))
else:
return func(self, name, path)
return check_wrapper
class ManageHandler(APIHandler):
"""ListRedisHandler list all codis list"""
allow = {
'get': ['^list$', '^search$'],
'post': ['^redis$', '^codis$']
}
@check_path
def get(self, _, path):
rlt = {}
if path == "list":
try:
select_obj = CodisInstance\
.query\
.order_by(\
CodisInstance.zk.asc(),\
CodisInstance.name.asc())\
.all()
except Exception as e:
self.set_status(500)
self.write(str(e))
else:
for o in select_obj:
ret = o.to_dict()
if not ret['zk'] in rlt:
rlt[ret['zk']] = []
rlt[ret['zk']].append(ret['name'])
self.write({'Codis': rlt})
elif path == "search":
try:
keyword = self.get_argument('s');
obj_codis = CodisInstance\
.query\
.filter(or_(CodisInstance.name.like("%{0}%".format(keyword)),
CodisInstance.zk.like("%{0}%".format(keyword)),
CodisInstance.dashboard.like("%{0}%".format(keyword)),
CodisInstance.proxy.like("%{0}%".format(keyword))
)\
)\
.all()
obj_redis = RedisInstance\
.query\
.filter(or_(RedisInstance.host.like("%{0}%".format(keyword)),
RedisInstance.port.like("%{0}%".format(keyword)),
RedisInstance.master_host.like("%{0}%".format(keyword)),
RedisInstance.master_port.like("%{0}%".format(keyword)),
RedisInstance.cluster.like("%{0}%".format(keyword))
)\
)\
.all()
except Exception as e:
self.set_status(500)
self.write(str(e))
else:
insensitive_keyword = re.compile(re.escape(keyword), re.IGNORECASE)
ret = {}
ret['codis'] = [o.to_dict() for o in obj_codis]
ret['redis'] = [o.to_dict() for o in obj_redis]
self.write(json.dumps(ret))
@require_node_privileges(RedisPrivilege.manage_redis, lambda c: 1)
@check_path
def post(self, _, path):
try:
try:
post_data = json.loads(self.request.body)
except:
raise ValueError('Invalid post data format')
if path == "redis":
p_host = str(post_data['host'])
p_port = int(post_data['port'])
p_status = str(post_data['status'])
RedisInstance.save_and_update(host=p_host, port=p_port, cluster=None, status=p_status, update=True)
elif path == "codis":
p_name = str(post_data['name'])
p_zk = str(post_data['zk'])
CodisInstance.save_and_update(name=p_name, zk=p_zk, update=True)
except Exception as e:
self.set_status(500)
self.write(str(e))
else:
self.write(json.dumps({'status': 'success'}))
class DefaultHandler(APIHandler):
"""MainHandler, used for apis not specified separately"""
allow = {
'get': ['^overview$']
}
@check_path
def get(self, name, path):
zk, _, base_url = getDashboardAddr(name)
url = base_url + "/api/" + path
try:
resp = requests.get(url)
except Exception as e:
self.set_status(500)
self.write(str(e))
return
# Simplefied api for 'overview'
if path == 'overview':
ret = {}
try:
data = json.loads(resp.text)
except Exception as e:
self.set_status(500)
self.write(str(e))
return
else:
ret['dashboard'] = base_url
ret['zk'] = zk
ret['product'] = data['product']
ret['ops'] = data['ops'] if data['ops'] >= 0 else 0
ret['keys'], ret['memory'] = (0, 0)
for redis_info in data['redis_infos']:
if redis_info is not None and 'db0' in redis_info:
ret['keys'] += int(redis_info['db0'].split(',')[0].split('=')[1])
ret['memory'] += int(redis_info['used_memory'])
self.write(json.dumps(ret))
else:
self.write(resp.text)
class DebugHandler(APIHandler):
"""DebugHandler, get codis debug info"""
def get(self, name, addr):
if not re.match(r'.+(\.nosajia\.com):\d+', str(addr)):
tmp = addr.split(":")
addr = tmp[0] + ".nosa.me:" + tmp[1]
url = "http://" + addr + "/debug/vars"
try:
resp = requests.get(url)
except Exception as e:
self.set_status(500)
self.write(str(e))
else:
self.write(resp.text)
class GroupHandler(APIHandler):
"""GroupHandler, get & post group info"""
allow = {
'get': ['^$'],
'post': ['^/addGroup$', '^/[0-9]+/addServer$', '^/[0-9]+/removeServer$', '^/[0-9]+/promote$'],
'delete': ['^/[0-9]+$']
}
@check_path
def get(self, name, path):
base_url = getDashboardAddr(name)[2]
url = base_url + "/api/server_groups" + path
try:
resp = requests.get(url)
except Exception as e:
self.set_status(500)
self.write(str(e))
return
ret = []
for group in json.loads(resp.text):
grp_data = {}
grp_data['id'] = group['id']
grp_data['product_name'] = group['product_name']
if group['servers'] is not None:
for server in group['servers']:
if server['type'] not in grp_data:
grp_data[server['type']] = []
url = base_url + "/api/redis/" + server['addr'] + "/stat"
serv_data = {}
try:
resp = requests.get(url)
resp_data = json.loads(resp.text)
except:
serv_data['serv_addr'] = server['addr']
else:
serv_data['serv_addr'] = server['addr']
serv_data['maxmemory'] = resp_data['maxmemory']
serv_data['used_memory'] = resp_data['used_memory']
if 'db0' in resp_data:
serv_data['db0'] = resp_data['db0']
grp_data[server['type']].append(serv_data)
ret.append(grp_data)
self.write(json.dumps(ret))
@require_node_privileges(RedisPrivilege.manage_redis, lambda c: 1)
@check_path
def post(self, name, path):
base_url = getDashboardAddr(name)[2]
if(path == "/addGroup"):
url = base_url + "/api/server_groups"
else:
url = base_url + "/api/server_group" + path
headers = {'Content-Type': 'application/text; charset=UTF-8'}
post_data = self.request.body
try:
if(re.match(r'^/[0-9]+/promote$', path)):
resp = requests.post(url, data=post_data, headers=headers)
else:
resp = requests.put(url, data=post_data, headers=headers)
except Exception as e:
self.set_status(500)
self.write(str(e))
else:
self.write(resp.text)
@require_node_privileges(RedisPrivilege.manage_redis, lambda c: 1)
@check_path
def delete(self, name, path):
base_url = getDashboardAddr(name)[2]
url = base_url + "/api/server_group" + path
headers = {'Content-Type': 'application/text; charset=UTF-8'}
try:
resp = requests.delete(url, headers=headers)
except Exception as e:
self.set_status(500)
self.write(str(e))
else:
self.write(resp.text)
class ProxyHandler(APIHandler):
"""ProxyHandler, get & post proxy info"""
def get(self, name):
base_url = getDashboardAddr(name)[2]
url = base_url + "/api/proxy/list"
try:
resp = requests.get(url)
except Exception as e:
self.set_status(500)
self.write(str(e))
return
else:
self.write(resp.text)
@require_node_privileges(RedisPrivilege.manage_redis, lambda c: 1)
def post(self, name):
base_url = getDashboardAddr(name)[2]
post_data = ''
url = base_url + "/api/proxy/list"
try:
resp = requests.get(url)
except Exception as e:
self.set_status(500)
self.write(str(e))
return
for proxy_data in json.loads(resp.text):
if proxy_data['id'] == self.get_argument('proxy_id'):
post_data = proxy_data
break
if post_data == '':
self.set_status(500)
self.write('Not valid proxy_id: %s ' % self.get_argument('proxy_id'))
return
# TODO mark_offline not test yet
if self.get_argument('state') == 'OFF':
post_data['state'] = 'mark_offline'
else:
post_data['state'] = 'online'
url = base_url + "/api/proxy"
post_data = json.dumps(post_data)
headers = {'Content-Type': 'application/json; charset=UTF-8'}
try:
resp = requests.post(url, data=post_data, headers=headers)
except Exception as e:
self.set_status(500)
self.write(str(e))
else:
self.write(resp.text)
class SlotHandler(APIHandler):
"""SlotHandler, get & post slot info"""
def get(self, name):
base_url = getDashboardAddr(name)[2]
url = base_url + "/api/slots"
try:
resp = requests.get(url)
except Exception as e:
self.set_status(500)
self.write(str(e))
return
slots = json.loads(resp.text)
ret_data = {}
for slot in slots:
if slot['group_id'] not in ret_data:
ret_data[slot['group_id']] = {
'group_id': slot['group_id'],
'sort_order': 1023,
'migrating': [],
'offline': [],
'online': []
}
# 'sort_order' is used for sorting. We wan group list are sorted in minimal slot order.
if(ret_data[slot['group_id']]['sort_order'] > slot['id']):
ret_data[slot['group_id']]['sort_order'] = slot['id']
# 'pre_migrate' status considered to be 'migrating' status.
if slot['state']['status'] == 'pre_migrate' \
or slot['state']['status'] == 'migrate':
ret_data[slot['group_id']]['migrating'].append(slot['id'])
else:
ret_data[slot['group_id']][slot['state']['status']].append(slot['id'])
for k, v in ret_data.iteritems():
v['migrating'] = getRange(v['migrating'])
v['offline'] = getRange(v['offline'])
v['online'] = getRange(v['online'])
ret_data = ret_data.values()
self.write(json.dumps(ret_data))
@require_node_privileges(RedisPrivilege.manage_redis, lambda c: 1)
def post(self, name):
base_url = getDashboardAddr(name)[2]
url = base_url + "/api/slot"
headers = {'Content-Type': 'application/text; charset=UTF-8'}
post_data = self.request.body
try:
resp = requests.post(url, data=post_data, headers=headers)
except Exception as e:
self.set_status(500)
self.write(str(e))
else:
self.write(resp.text)
class MigrateHandler(APIHandler):
"""MigrateHandler, migrate slot, show migrate status"""
allow = {
'get':['^/status$', '^/tasks$'],
'post':['^$']
}
@check_path
def get(self, name, path):
base_url = getDashboardAddr(name)[2]
url = base_url + "/api/migrate" + path
try:
resp = requests.get(url)
except Exception as e:
self.set_status(500)
self.write(str(e))
return
else:
if path == '/tasks':
# Produce compact data
tmp_data={}
task_data = json.loads(resp.text)
if task_data is None or len(task_data) == 0:
self.write(resp.text)
return
for task_slot in task_data:
tmp_key = '%s-%s-%s-%s-%s'%(task_slot['new_group'],
task_slot['delay'],
task_slot['create_at'],
task_slot['percent'],
task_slot['status'])
if tmp_key not in tmp_data:
tmp_data[tmp_key] = []
tmp_data[tmp_key].append(task_slot['slot_id'])
rlt = []
for k, v in tmp_data.iteritems():
tmp_arr = k.split('-')
if len(v) == 1:
slot_ids = v
else:
tmp_range = getRange(v)[0]
print v,tmp_range
slot_ids = '%s ~ %s'%(tmp_range[0], tmp_range[1])
rlt.append({
'slot_id': slot_ids,
'new_group': tmp_arr[0],
'delay': tmp_arr[1],
'create_at': tmp_arr[2],
'percent': tmp_arr[3],
'status': tmp_arr[4]
})
self.write(json.dumps(rlt))
else:
self.write(resp.text)
@require_node_privileges(RedisPrivilege.manage_redis, lambda c: 1)
@check_path
def post(self, name, path):
base_url = getDashboardAddr(name)[2]
url = base_url + "/api/migrate"
headers = {'Content-Type': 'application/text; charset=UTF-8'}
post_data = self.request.body
try:
resp = requests.post(url, data=post_data, headers=headers)
except Exception as e:
self.set_status(500)
self.write(str(e))
return
else:
self.write(resp.text)
handlers = [
('/(manage)/(.+)', ManageHandler),
('/([\w_-]+)/proxy', ProxyHandler),
('/([\w_-]+)/slots', SlotHandler),
('/([\w_-]+)/migrate(|/.+)', MigrateHandler),
('/([\w_-]+)/server_groups(|/.+)', GroupHandler),
('/([\w_-]+)/debug/(.+)', DebugHandler),
('/([\w_-]+)/(.+)', DefaultHandler),
]
| [
"[email protected]"
] | |
2c387465d98034a84512b41c9b90e62a486f5c44 | 05a9e0bb7e33099f94dfc8af53b4837bc5c9d287 | /python/small_impls/particle_energy_minimize/energy.py | 229e8a9b9ee860b91ef46a307ee5c45819fce34d | [] | no_license | HiroIshida/snippets | 999c09efadae80397cb82a424328bb1dbda4915f | f64dcd793184be64682b55bdaee7392fd97a0916 | refs/heads/master | 2023-09-01T08:18:42.523625 | 2023-09-01T04:08:20 | 2023-09-01T04:08:20 | 207,662,767 | 7 | 2 | null | 2022-08-01T23:20:42 | 2019-09-10T21:04:01 | C++ | UTF-8 | Python | false | false | 2,903 | py | import numpy as np
from scipy.optimize import OptimizeResult, minimize, Bounds
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import copy
import time
from typing import Callable, Tuple
def compute_distance_matrix(points: np.ndarray):
assert points.ndim == 2
n_point, n_dim = points.shape
squared_dist_matrix = np.zeros((n_point, n_point))
for i, p in enumerate(points):
squared_dist_matrix[:, i] = np.sum((p - points) ** 2, axis=1)
dist_matrix = np.sqrt(squared_dist_matrix)
return dist_matrix
def fun_energy(points: np.ndarray, n_power=2):
n_point, n_dim = points.shape
dist_matrix = compute_distance_matrix(points)
modified_dist_matrix = copy.deepcopy(dist_matrix)
for i in range(n_point):
modified_dist_matrix[i, i] = 1e5
energy = 0.5 * np.sum(1.0 / (modified_dist_matrix ** n_power)) # 0.5 becaue double count
part_grad_list = []
for i, p in enumerate(points):
diff = points - p
r = modified_dist_matrix[:, i]
tmp = (1.0 / r **(n_power + 2))
part_grad = np.sum(n_power * np.tile(tmp, (n_dim, 1)).T * diff, axis=0)
part_grad_list.append(part_grad)
grad = np.hstack(part_grad_list)
return energy, grad
def scipinize(fun: Callable) -> Tuple[Callable, Callable]:
closure_member = {"jac_cache": None}
def fun_scipinized(x):
f, jac = fun(x)
closure_member["jac_cache"] = jac
return f
def fun_scipinized_jac(x):
return closure_member["jac_cache"]
return fun_scipinized, fun_scipinized_jac
def gradient_test(func, x0, decimal=4):
f0, grad = func(x0)
n_dim = len(x0)
eps = 1e-7
grad_numerical = np.zeros(n_dim)
for idx in range(n_dim):
x1 = copy.copy(x0)
x1[idx] += eps
f1, _ = func(x1)
grad_numerical[idx] = (f1 - f0) / eps
print(grad_numerical)
print(grad)
np.testing.assert_almost_equal(grad, grad_numerical, decimal=decimal)
n_dim = 3
n_point = 27
points = np.random.rand(n_point, n_dim)
a = 1.5
def obj_fun(points: np.ndarray):
f1, grad1 = fun_energy(points, n_power=1)
f2, grad2 = fun_energy(points, n_power=-2)
#return a * f1 + b * f2, a * grad1 + b * grad2
return f1 + a * f2, grad1 + a * grad2
f, jac = scipinize(lambda x: obj_fun(x.reshape(-1, n_dim)))
x_init = points.flatten()
bounds = Bounds(lb = np.zeros(n_dim * n_point), ub = np.ones(n_dim * n_point))
slsqp_option = {
"maxiter": 1000
}
res = minimize(
f,
x_init,
method="SLSQP",
jac=jac,
bounds=bounds,
options=slsqp_option,
)
points_sol = res.x.reshape(-1, n_dim)
print(res)
if n_dim == 2:
plt.scatter(points_sol[:, 0], points_sol[:, 1])
plt.show()
else:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(points_sol[:, 0], points_sol[:, 1], points_sol[:, 2])
plt.show()
| [
"[email protected]"
] | |
7b9ea24050935fa3ba59d49aa6dfd419a7ee5dac | a1615563bb9b124e16f4163f660d677f3224553c | /LI/lib/python3.8/site-packages/astropy/wcs/wcsapi/low_level_api.py | 418e38efe4dc325575c8638caa1cb045fccce3b9 | [
"MIT"
] | permissive | honeybhardwaj/Language_Identification | 2a247d98095bd56c1194a34a556ddfadf6f001e5 | 1b74f898be5402b0c1a13debf595736a3f57d7e7 | refs/heads/main | 2023-04-19T16:22:05.231818 | 2021-05-15T18:59:45 | 2021-05-15T18:59:45 | 351,470,447 | 5 | 4 | MIT | 2021-05-15T18:59:46 | 2021-03-25T14:42:26 | Python | UTF-8 | Python | false | false | 15,474 | py | import os
import abc
import numpy as np
__all__ = ['BaseLowLevelWCS', 'validate_physical_types']
class BaseLowLevelWCS(metaclass=abc.ABCMeta):
"""
Abstract base class for the low-level WCS interface.
This is described in `APE 14: A shared Python interface for World Coordinate
Systems <https://doi.org/10.5281/zenodo.1188875>`_.
"""
@property
@abc.abstractmethod
def pixel_n_dim(self):
"""
The number of axes in the pixel coordinate system.
"""
@property
@abc.abstractmethod
def world_n_dim(self):
"""
The number of axes in the world coordinate system.
"""
@property
@abc.abstractmethod
def world_axis_physical_types(self):
"""
An iterable of strings describing the physical type for each world axis.
These should be names from the VO UCD1+ controlled Vocabulary
(http://www.ivoa.net/documents/latest/UCDlist.html). If no matching UCD
type exists, this can instead be ``"custom:xxx"``, where ``xxx`` is an
arbitrary string. Alternatively, if the physical type is
unknown/undefined, an element can be `None`.
"""
@property
@abc.abstractmethod
def world_axis_units(self):
"""
An iterable of strings given the units of the world coordinates for each
axis.
The strings should follow the `IVOA VOUnit standard
<http://ivoa.net/documents/VOUnits/>`_ (though as noted in the VOUnit
specification document, units that do not follow this standard are still
allowed, but just not recommended).
"""
@abc.abstractmethod
def pixel_to_world_values(self, *pixel_arrays):
"""
Convert pixel coordinates to world coordinates.
This method takes `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` scalars or arrays as
input, and pixel coordinates should be zero-based. Returns
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` scalars or arrays in units given by
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_units`. Note that pixel coordinates are
assumed to be 0 at the center of the first pixel in each dimension. If a
pixel is in a region where the WCS is not defined, NaN can be returned.
The coordinates should be specified in the ``(x, y)`` order, where for
an image, ``x`` is the horizontal coordinate and ``y`` is the vertical
coordinate.
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned.
"""
def array_index_to_world_values(self, *index_arrays):
"""
Convert array indices to world coordinates.
This is the same as `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_to_world_values` except that
the indices should be given in ``(i, j)`` order, where for an image
``i`` is the row and ``j`` is the column (i.e. the opposite order to
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_to_world_values`).
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned.
"""
return self.pixel_to_world_values(*index_arrays[::-1])
@abc.abstractmethod
def world_to_pixel_values(self, *world_arrays):
"""
Convert world coordinates to pixel coordinates.
This method takes `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` scalars or arrays as
input in units given by `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_units`. Returns
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` scalars or arrays. Note that pixel
coordinates are assumed to be 0 at the center of the first pixel in each
dimension. If a world coordinate does not have a matching pixel
coordinate, NaN can be returned. The coordinates should be returned in
the ``(x, y)`` order, where for an image, ``x`` is the horizontal
coordinate and ``y`` is the vertical coordinate.
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned.
"""
def world_to_array_index_values(self, *world_arrays):
"""
Convert world coordinates to array indices.
This is the same as `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_to_pixel_values` except that
the indices should be returned in ``(i, j)`` order, where for an image
``i`` is the row and ``j`` is the column (i.e. the opposite order to
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_to_world_values`). The indices should be
returned as rounded integers.
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned.
"""
pixel_arrays = self.world_to_pixel_values(*world_arrays)
if self.pixel_n_dim == 1:
pixel_arrays = (pixel_arrays,)
else:
pixel_arrays = pixel_arrays[::-1]
array_indices = tuple(np.asarray(np.floor(pixel + 0.5), dtype=np.int_) for pixel in pixel_arrays)
return array_indices[0] if self.pixel_n_dim == 1 else array_indices
@property
@abc.abstractmethod
def world_axis_object_components(self):
"""
A list with `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` elements giving information
on constructing high-level objects for the world coordinates.
Each element of the list is a tuple with three items:
* The first is a name for the world object this world array
corresponds to, which *must* match the string names used in
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_classes`. Note that names might
appear twice because two world arrays might correspond to a single
world object (e.g. a celestial coordinate might have both “ra” and
“dec” arrays, which correspond to a single sky coordinate object).
* The second element is either a string keyword argument name or a
positional index for the corresponding class from
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_classes`.
* The third argument is a string giving the name of the property
to access on the corresponding class from
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_classes` in
order to get numerical values. Alternatively, this argument can be a
callable Python object that taks a high-level coordinate object and
returns the numerical values suitable for passing to the low-level
WCS transformation methods.
See the document
`APE 14: A shared Python interface for World Coordinate Systems
<https://doi.org/10.5281/zenodo.1188875>`_ for examples.
"""
@property
@abc.abstractmethod
def world_axis_object_classes(self):
"""
A dictionary giving information on constructing high-level objects for
the world coordinates.
Each key of the dictionary is a string key from
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_components`, and each value is a
tuple with three elements or four elements:
* The first element of the tuple must be a class or a string specifying
the fully-qualified name of a class, which will specify the actual
Python object to be created.
* The second element, should be a tuple specifying the positional
arguments required to initialize the class. If
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_components` specifies that the
world coordinates should be passed as a positional argument, this this
tuple should include `None` placeholders for the world coordinates.
* The third tuple element must be a dictionary with the keyword
arguments required to initialize the class.
* Optionally, for advanced use cases, the fourth element (if present)
should be a callable Python object that gets called instead of the
class and gets passed the positional and keyword arguments. It should
return an object of the type of the first element in the tuple.
Note that we don't require the classes to be Astropy classes since there
is no guarantee that Astropy will have all the classes to represent all
kinds of world coordinates. Furthermore, we recommend that the output be
kept as human-readable as possible.
The classes used here should have the ability to do conversions by
passing an instance as the first argument to the same class with
different arguments (e.g. ``Time(Time(...), scale='tai')``). This is
a requirement for the implementation of the high-level interface.
The second and third tuple elements for each value of this dictionary
can in turn contain either instances of classes, or if necessary can
contain serialized versions that should take the same form as the main
classes described above (a tuple with three elements with the fully
qualified name of the class, then the positional arguments and the
keyword arguments). For low-level API objects implemented in Python, we
recommend simply returning the actual objects (not the serialized form)
for optimal performance. Implementations should either always or never
use serialized classes to represent Python objects, and should indicate
which of these they follow using the
`~astropy.wcs.wcsapi.BaseLowLevelWCS.serialized_classes` attribute.
See the document
`APE 14: A shared Python interface for World Coordinate Systems
<https://doi.org/10.5281/zenodo.1188875>`_ for examples .
"""
# The following three properties have default fallback implementations, so
# they are not abstract.
@property
def array_shape(self):
"""
The shape of the data that the WCS applies to as a tuple of length
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` in ``(row, column)``
order (the convention for arrays in Python).
If the WCS is valid in the context of a dataset with a particular
shape, then this property can be used to store the shape of the
data. This can be used for example if implementing slicing of WCS
objects. This is an optional property, and it should return `None`
if a shape is not known or relevant.
"""
if self.pixel_shape is None:
return None
else:
return self.pixel_shape[::-1]
@property
def pixel_shape(self):
"""
The shape of the data that the WCS applies to as a tuple of length
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` in ``(x, y)``
order (where for an image, ``x`` is the horizontal coordinate and ``y``
is the vertical coordinate).
If the WCS is valid in the context of a dataset with a particular
shape, then this property can be used to store the shape of the
data. This can be used for example if implementing slicing of WCS
objects. This is an optional property, and it should return `None`
if a shape is not known or relevant.
If you are interested in getting a shape that is comparable to that of
a Numpy array, you should use
`~astropy.wcs.wcsapi.BaseLowLevelWCS.array_shape` instead.
"""
return None
@property
def pixel_bounds(self):
"""
The bounds (in pixel coordinates) inside which the WCS is defined,
as a list with `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim`
``(min, max)`` tuples.
The bounds should be given in ``[(xmin, xmax), (ymin, ymax)]``
order. WCS solutions are sometimes only guaranteed to be accurate
within a certain range of pixel values, for example when defining a
WCS that includes fitted distortions. This is an optional property,
and it should return `None` if a shape is not known or relevant.
"""
return None
@property
def pixel_axis_names(self):
"""
An iterable of strings describing the name for each pixel axis.
If an axis does not have a name, an empty string should be returned
(this is the default behavior for all axes if a subclass does not
override this property). Note that these names are just for display
purposes and are not standardized.
"""
return [''] * self.pixel_n_dim
@property
def world_axis_names(self):
"""
An iterable of strings describing the name for each world axis.
If an axis does not have a name, an empty string should be returned
(this is the default behavior for all axes if a subclass does not
override this property). Note that these names are just for display
purposes and are not standardized. For standardized axis types, see
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_physical_types`.
"""
return [''] * self.world_n_dim
@property
def axis_correlation_matrix(self):
"""
Returns an (`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim`,
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim`) matrix that
indicates using booleans whether a given world coordinate depends on a
given pixel coordinate.
This defaults to a matrix where all elements are `True` in the absence
of any further information. For completely independent axes, the
diagonal would be `True` and all other entries `False`.
"""
return np.ones((self.world_n_dim, self.pixel_n_dim), dtype=bool)
@property
def serialized_classes(self):
"""
Indicates whether Python objects are given in serialized form or as
actual Python objects.
"""
return False
def _as_mpl_axes(self):
"""
Compatibility hook for Matplotlib and WCSAxes. With this method, one can
do::
from astropy.wcs import WCS
import matplotlib.pyplot as plt
wcs = WCS('filename.fits')
fig = plt.figure()
ax = fig.add_axes([0.15, 0.1, 0.8, 0.8], projection=wcs)
...
and this will generate a plot with the correct WCS coordinates on the
axes.
"""
from astropy.visualization.wcsaxes import WCSAxes
return WCSAxes, {'wcs': self}
UCDS_FILE = os.path.join(os.path.dirname(__file__), 'data', 'ucds.txt')
with open(UCDS_FILE) as f:
VALID_UCDS = set([x.strip() for x in f.read().splitlines()[1:]])
def validate_physical_types(physical_types):
"""
Validate a list of physical types against the UCD1+ standard
"""
for physical_type in physical_types:
if (physical_type is not None and
physical_type not in VALID_UCDS and
not physical_type.startswith('custom:')):
raise ValueError(f"Invalid physical type: {physical_type}")
| [
"[email protected]"
] | |
64ad7132ad974153f6145736b5dcf757c148bb1b | 0d55bde6f4784f6dea9e8e6945d05bbf627e1e7d | /Packs/GSuiteAdmin/Integrations/GSuiteAdmin/GSuiteAdmin_test.py | 7e202d339ad3241b43c8c2daba464dd02f2c007f | [
"MIT"
] | permissive | crestdatasystems/content | d7040415431b5d06d1569433a49869afcb0292bd | 5f0f00840c39f028dca8377551bbd725d8ee8a2d | refs/heads/master | 2023-08-16T19:35:38.150912 | 2023-07-11T05:59:59 | 2023-07-11T05:59:59 | 280,669,011 | 2 | 1 | MIT | 2023-03-10T16:00:35 | 2020-07-18T14:06:44 | Python | UTF-8 | Python | false | false | 45,957 | py | import json
from unittest.mock import patch
import pytest
import demistomock as demisto
from GSuiteAdmin import MESSAGES, GSuiteClient, OUTPUT_PREFIX, HR_MESSAGES, Client
with open('test_data/service_account_json.txt') as f:
TEST_JSON = f.read()
MOCKER_HTTP_METHOD = 'GSuiteApiModule.GSuiteClient.http_request'
@pytest.fixture
def gsuite_client():
headers = {
'Content-Type': 'application/json'
}
return Client(GSuiteClient.safe_load_non_strict_json(TEST_JSON), verify=False, proxy=False, headers=headers)
def test_main(mocker):
"""
Scenario: Main should initialize gsuite_client class and called command respectively.
Given:
- params and args.
When:
- Initializing gsuite_client with the parameters provided and calling respective command.
Then:
- Ensure results is returned from command function.
"""
import GSuiteAdmin
params = {
'user_service_account_json': TEST_JSON,
'admin_email': '[email protected]'
}
mocker.patch.object(demisto, 'command', return_value='test-module')
mocker.patch.object(demisto, 'params', return_value=params)
mocker.patch.object(GSuiteAdmin, 'test_module', return_value='ok')
GSuiteAdmin.main()
assert GSuiteAdmin.test_module.called
@patch('GSuiteAdmin.return_error')
def test_main_failure(mock_return_error, capfd, mocker):
"""
Scenario: Main should handle error while initializing gsuite_client class and called command respectively.
Given:
- params and args.
When:
- Initializing gsuite_client with the parameters provided and calling respective command.
Then:
- Ensure exception is raised.
"""
import GSuiteAdmin
params = {
'user_service_account_json': TEST_JSON,
}
mocker.patch.object(GSuiteAdmin.demisto, 'params', return_value=params)
mocker.patch.object(GSuiteAdmin.demisto, 'command', return_value='test-module')
mocker.patch.object(GSuiteAdmin, 'test_module', side_effect=Exception)
with capfd.disabled():
GSuiteAdmin.main()
mock_return_error.assert_called_once_with('Error: ')
def test_test_function(mocker, gsuite_client):
"""
Scenario: Call to test-module should return 'ok' if API call succeeds.
Given:
- gsuite_client object
When:
- Calling test function.
Then:
- Ensure 'ok' should be return.
"""
from GSuiteAdmin import test_module, GSuiteClient
mocker.patch.object(GSuiteClient, 'set_authorized_http')
mocker.patch.object(GSuiteClient, 'http_request')
assert test_module(gsuite_client) == 'ok'
@patch(MOCKER_HTTP_METHOD)
def test_gsuite_mobile_update_command_success(mocker_http_request, gsuite_client):
"""
Scenario: Mobile update command successful execution.
Given:
- Working API integration and correct parameters
When:
- Calling command method gsuite_mobile_update_command.
Then:
- Ensure expected human readable output is being set.
:param gsuite_client: gsuite_client object fixture
:param mocker_http_request: mocker object for gsuite_client.http_request
:return: None
"""
mocker_http_request.return_value = {}
from GSuiteAdmin import mobile_update_command
response = mobile_update_command(gsuite_client, {'resource_id': 'RESOURCE_ID'})
assert response.readable_output == HR_MESSAGES['MOBILE_UPDATE_SUCCESS'].format('RESOURCE_ID')
@patch(MOCKER_HTTP_METHOD)
def test_gsuite_mobile_update_command_failure(mocker_http_request, gsuite_client):
"""
Scenario: Mobile update command execution failure.
Given:
- Non-working API integration or incorrect parameters
When:
- Calling command method gsuite_mobile_update_command.
Then:
- Ensure expected error output is being set.
:param gsuite_client: gsuite_client object fixture
:param mocker_http_request: mocker object for gsuite_client.http_request
:return: None
"""
mocker_http_request.side_effect = Exception('UPDATE_ERROR')
from GSuiteAdmin import mobile_update_command
with pytest.raises(Exception, match='UPDATE_ERROR'):
mobile_update_command(gsuite_client, {})
MOBILE_ACTION_ERROR_CASES = [
('Internal error encountered', MESSAGES.get('INVALID_RESOURCE_CUSTOMER_ID_ERROR', '')),
('Bad Request', MESSAGES.get('INVALID_RESOURCE_CUSTOMER_ID_ERROR', '')),
('Some other error', 'Some other error'),
]
@pytest.mark.parametrize('error_message, parsed_error_message', MOBILE_ACTION_ERROR_CASES)
def test_invalid_gsuite_mobile_update_command_command(mocker, gsuite_client, error_message, parsed_error_message):
"""
Given:
- A client, a resource id, and an action to execute on the mobile device.
When:
- Running the gsuite_mobile_update_command command, and receiving an error from the API.
Then:
- Validate that the ambiguous error message is mapped to a more human readable error message.
"""
from GSuiteAdmin import mobile_update_command
from CommonServerPython import DemistoException
mocker.patch(MOCKER_HTTP_METHOD,
side_effect=DemistoException(message=error_message))
with pytest.raises(DemistoException) as e:
mobile_update_command(client=gsuite_client,
args={'customer_id': 'customer_id', 'resource_id': 'wrong_resource_id',
'action': 'some_action'})
assert parsed_error_message in str(e)
@patch(MOCKER_HTTP_METHOD)
def test_gsuite_mobile_delete_command_success(mocker_http_request, gsuite_client):
"""
Scenario: Mobile delete command successful execution.
Given:
- Working API integration and correct parameters
When:
- Calling command method gsuite_mobile_delete_command.
Then:
- Ensure expected human readable output is being set.
:param gsuite_client: gsuite_client object fixture
:param mocker_http_request: mocker object for gsuite_client.http_request
:return: None
"""
mocker_http_request.return_value = {}
from GSuiteAdmin import mobile_delete_command
response = mobile_delete_command(gsuite_client, {'resource_id': 'DELETE_RESOURCE'})
assert response.readable_output == HR_MESSAGES['MOBILE_DELETE_SUCCESS'].format('DELETE_RESOURCE')
@patch(MOCKER_HTTP_METHOD)
def test_gsuite_mobile_delete_command_failure(mocker_http_request, gsuite_client):
"""
Scenario: Mobile delete command execution failure.
Given:
- Non-working API integration or incorrect parameters
When:
- Calling command method gsuite_mobile_delete_command.
Then:
- Ensure expected error output is being set.
:param gsuite_client: gsuite_client object fixture
:param mocker_http_request: mocker object for gsuite_client.http_request
:return: None
"""
mocker_http_request.side_effect = Exception('DELETE_ERROR')
from GSuiteAdmin import mobile_delete_command
with pytest.raises(Exception, match='DELETE_ERROR'):
mobile_delete_command(gsuite_client, {})
def test_user_create_command(gsuite_client, mocker):
"""
Scenario: gsuite-user-create should works if valid arguments are provided.
Given:
- Command args.
When:
- Calling gsuite-user-create command with the arguments provided.
Then:
- Ensure CommandResult entry should be as expected.
"""
from GSuiteAdmin import user_create_command
with open('test_data/user_create_args.json', 'r') as file:
args = json.load(file)
with open('test_data/user_create_response.json') as file:
api_response = json.load(file)
with open('test_data/user_create_entry_context.json') as file:
expected_entry_context = json.load(file)
mocker.patch('GSuiteAdmin.GSuiteClient.http_request', return_value=api_response)
command_result = user_create_command(gsuite_client, args)
assert command_result.readable_output == expected_entry_context['HumanReadable']
assert command_result.outputs == expected_entry_context['EntryContext']['GSuite.User(val.id == obj.id)']
assert command_result.raw_response == expected_entry_context['Contents']
assert command_result.outputs_key_field == ['id']
assert command_result.outputs_prefix == 'GSuite.User'
def test_user_get_command(gsuite_client, mocker):
"""
Scenario: gsuite-user-get should works if valid arguments are provided.
Given:
- Command args.
When:
- Calling gsuite-user-create command with the arguments provided.
Then:
- Ensure CommandResult entry should be as expected.
"""
from GSuiteAdmin import user_get_command
args = {'user': 'testuser'}
with open('test_data/user_create_response.json') as file:
api_response = json.load(file)
with open('test_data/user_get_entry_context.json') as file:
expected_entry_context = json.load(file)
mocker.patch('GSuiteAdmin.GSuiteClient.http_request', return_value=api_response)
command_result = user_get_command(gsuite_client, args)
assert command_result.readable_output == expected_entry_context['HumanReadable']
assert command_result.outputs == expected_entry_context['EntryContext']['GSuite.User(val.id == obj.id)']
assert command_result.raw_response == expected_entry_context['Contents']
assert command_result.outputs_key_field == ['id']
assert command_result.outputs_prefix == 'GSuite.User'
@patch('GSuiteAdmin.GSuiteClient.http_request')
def test_user_alias_add_command_success(mocker_http_request, gsuite_client):
"""
Scenario: For gsuite-user-alias-add command successful run.
Given:
- Command args.
When:
- Calling gsuite-user-alias-add command with the parameters provided.
Then:
- Ensure command's raw_response, outputs, readable_output, outputs_key_field, outputs_prefix should be as expected.
"""
from GSuiteAdmin import user_alias_add_command
with open('test_data/user_alias_add.json', encoding='utf-8') as data:
expected_res = json.load(data)
mocker_http_request.return_value = expected_res['Contents']
args = expected_res['args']
result = user_alias_add_command(gsuite_client, args)
assert result.raw_response == expected_res['Contents']
assert result.outputs == expected_res['Outputs']
assert result.readable_output == expected_res['HumanReadable']
assert result.outputs_key_field == ['id', 'alias']
assert result.outputs_prefix == OUTPUT_PREFIX['ADD_ALIAS']
@patch('GSuiteAdmin.GSuiteClient.http_request')
def test_user_alias_add_command_wrong_argument(mocker_http_request, gsuite_client):
"""
Scenario: Wrong argument given gsuite-user-alias-add command.
Given:
- Command args.
When:
- Calling gsuite-user-alias-add command with the parameters provided.
Then:
- Ensure command should raise Exception as expected.
"""
from GSuiteAdmin import user_alias_add_command
message = "message"
mocker_http_request.side_effect = Exception(message)
args = {'user_key': '[email protected]',
'alias': '[email protected]',
'admin_email': '[email protected]'}
with pytest.raises(Exception, match=message):
user_alias_add_command(gsuite_client, args)
@patch('GSuiteAdmin.GSuiteClient.http_request')
def test_group_create_command_success(mocker_http_request, gsuite_client):
"""
Scenario: For gsuite-group-create command success.
Given:
- Command args.
When:
- Calling gsuite-group-create command with the parameters provided.
Then:
- Ensure command's raw_response, outputs, readable_output, outputs_key_field, outputs_prefix should be as expected.
"""
from GSuiteAdmin import group_create_command
with open('test_data/group_create_test_data.json', encoding='utf-8') as data:
test_data = json.load(data)
response = test_data.get('response_data', {})
mocker_http_request.return_value = response
result = group_create_command(gsuite_client, test_data.get('args', {}))
assert result.raw_response == response
assert result.outputs == response
assert result.readable_output.startswith("### " + HR_MESSAGES['GROUP_CREATE_SUCCESS'].format(response['name']))
assert result.outputs_key_field == 'id'
assert result.outputs_prefix == OUTPUT_PREFIX['GROUP']
@patch('GSuiteAdmin.GSuiteClient.http_request')
def test_group_create_command_failure(mocker_http_request, gsuite_client):
"""
Scenario: For gsuite-group-create command failure.
Given:
- Command args and a non-working gsuite api integration.
When:
- Calling gsuite-group-create command with the parameters provided.
Then:
- Ensure command's error response is as expected.
"""
mocker_http_request.side_effect = ValueError("SOME_ERROR")
from GSuiteAdmin import group_create_command
with pytest.raises(Exception, match="SOME_ERROR"):
group_create_command(gsuite_client, {})
@patch('GSuiteAdmin.GSuiteClient.http_request')
def test_group_get_command(mocker_http_request, gsuite_client):
"""
Scenario: For gsuite-group-get command
Given:
- Command args.
When:
- Calling gsuite-group-get command with the parameters provided.
Then:
- Ensure command's raw_response, outputs, readable_output, outputs_key_field, outputs_prefix should be as expected.
"""
from GSuiteAdmin import group_get_command
with open('test_data/group_get_test_data.json', encoding='utf-8') as data:
test_data = json.load(data)
response = test_data.get('response_data', {})
mocker_http_request.return_value = response
result = group_get_command(gsuite_client, test_data.get('args', {}))
assert result.raw_response == response
assert result.outputs == response
assert result.readable_output.startswith("### " + HR_MESSAGES['GROUP_GET_SUCCESS'].format(response['name']))
assert result.outputs_key_field == 'id'
assert result.outputs_prefix == OUTPUT_PREFIX['GROUP']
def test_prepare_args_for_role_assignment_list():
"""
Scenario: Valid arguments given for gsuite-role-assignment-list command.
Given:
- Command args.
When:
- Calling prepare_args_for_role_assignment_list with command arguments.
Then:
- Ensure prepared arguments should be returned.
"""
from GSuiteAdmin import prepare_args_for_role_assignment_list
arguments = {
'page_token': 'page token',
'role_id': 'role id',
'user_key': 'user key',
'customer_id': 'my_customer',
'admin_email': '[email protected]',
'max_results': '5'
}
expected_arguments = {
'pageToken': 'page token',
'roleId': 'role id',
'userKey': 'user key',
'maxResults': 5
}
assert prepare_args_for_role_assignment_list(arguments) == expected_arguments
@pytest.mark.parametrize('args', [{'max_results': 'abc', 'customer_id': 'c1', 'admin_email': 'e1'},
{'max_results': '-1', 'customer_id': 'c2', 'admin_email': 'e2'}])
def test_prepare_args_for_role_assignment_list_invalid_max_results_argument(args):
"""
Scenario: Invalid max_results argument given for gsuite-role-assignment-list command.
Given:
- Command args.
When:
- Calling prepare_args_for_role_assignment_list with command arguments.
Then:
- Ensure ValueError will be raised with respective message.
"""
from GSuiteAdmin import prepare_args_for_role_assignment_list
with pytest.raises(ValueError, match=MESSAGES['INTEGER_ERROR'].format('max_results')):
prepare_args_for_role_assignment_list(args)
def test_role_assignment_list(gsuite_client, mocker):
"""
Scenario: gsuite-role-assignment-list command is called with valid arguments.
Given:
- Command args.
When:
- Calling role_assignment_list with command arguments.
Then:
- Ensure CommandResult should return data as expected.
"""
from GSuiteAdmin import role_assignment_list_command
arguments = {
'customer_id': 'cfdge',
'max_results': '1'
}
with open('test_data/role_assignment_list_response.json') as file:
api_response = json.load(file)
with open('test_data/role_assignment_list_entry_context.json') as file:
expected_entry_context = json.load(file)
mocker.patch(MOCKER_HTTP_METHOD, side_effect=[api_response, {}])
command_result = role_assignment_list_command(gsuite_client, arguments)
assert command_result.readable_output == expected_entry_context['HumanReadable']
assert command_result.outputs == expected_entry_context['EntryContext']
assert command_result.raw_response == expected_entry_context['Contents']
assert role_assignment_list_command(gsuite_client, {}).readable_output == HR_MESSAGES['NO_RECORDS'].format(
'role assignment details')
def test_role_assignment_create(gsuite_client, mocker):
"""
Scenario: gsuite-role-assignment-create command is called with valid arguments.
Given:
- Command args.
When:
- Calling role_assignment_list with command arguments.
Then:
- Ensure CommandResult should return data as expected.
"""
from GSuiteAdmin import role_assignment_create_command
arguments = {
'customer_id': 'customer id',
'scope_type': 'CUSTOMER',
'role_id': 'role1',
'assigned_to': '1234'
}
with open('test_data/role_assignment_create_response.json') as file:
api_response = json.load(file)
with open('test_data/role_assignment_create_entry_context.json') as file:
expected_entry_context = json.load(file)
mocker.patch(MOCKER_HTTP_METHOD, return_value=api_response)
command_result = role_assignment_create_command(gsuite_client, arguments)
assert command_result.readable_output == expected_entry_context['HumanReadable']
assert command_result.outputs == expected_entry_context['EntryContext'][
'GSuite.RoleAssignment(val.roleAssignmentId == obj.roleAssignmentId)']
assert command_result.raw_response == expected_entry_context['Contents']
assert command_result.outputs_key_field == 'roleAssignmentId'
assert command_result.outputs_prefix == 'GSuite.RoleAssignment'
@patch('GSuiteAdmin.GSuiteClient.http_request')
def test_role_create_command_success(mocker_http_request, gsuite_client):
"""
Scenario: For gsuite-role-create command success.
Given:
- Command args.
When:
- Calling gsuite-role-create command with the parameters provided.
Then:
- Ensure command's raw_response, outputs, readable_output, outputs_key_field, outputs_prefix should be as expected.
"""
from GSuiteAdmin import role_create_command
with open('test_data/role_create_test_data.json', encoding='utf-8') as data:
test_data = json.load(data)
response_data = test_data['response']
mocker_http_request.return_value = response_data
result = role_create_command(gsuite_client, test_data['args'])
assert result.raw_response == response_data
assert result.outputs == response_data
assert result.readable_output.startswith(
"### " + HR_MESSAGES['ROLE_CREATE_SUCCESS'])
assert result.outputs_key_field == 'roleId'
assert result.outputs_prefix == OUTPUT_PREFIX['ROLE']
@patch('GSuiteAdmin.GSuiteClient.http_request')
def test_role_create_command_failure(mocker_http_request, gsuite_client):
"""
Scenario: For gsuite-role-create command failure.
Given:
- Command args and a non-working gsuite api integration.
When:
- Calling gsuite-role-create command with the parameters provided.
Then:
- Ensure command's error response is as expected.
"""
mocker_http_request.side_effect = ValueError("SOME_ERROR")
from GSuiteAdmin import role_create_command
with pytest.raises(Exception, match="SOME_ERROR"):
role_create_command(gsuite_client, {'role_privileges': 'test:test'})
@patch(MOCKER_HTTP_METHOD)
def test_gsuite_token_revoke_command_success(mocker_http_request, gsuite_client):
"""
Scenario: Token revoke command successful execution.
Given:
- Working API integration and correct parameters
When:
- Calling command method gsuite_token_revoke_command.
Then:
- Ensure expected human readable output is being set.
"""
mocker_http_request.return_value = {}
from GSuiteAdmin import token_revoke_command
response = token_revoke_command(gsuite_client, {'client_id': 'CLIENT_ID'})
assert response.readable_output == HR_MESSAGES['TOKEN_REVOKE_SUCCESS'].format('CLIENT_ID')
@patch(MOCKER_HTTP_METHOD)
def test_gsuite_token_revoke_command_failure(mocker_http_request, gsuite_client):
"""
Scenario: Token revoke command failure.
Given:
- Non-working API integration or incorrect parameters
When:
- Calling command method gsuite_token_revoke_command.
Then:
- Ensure expected error output is being set.
"""
mocker_http_request.side_effect = ValueError('SOME_ERROR')
from GSuiteAdmin import token_revoke_command
with pytest.raises(Exception, match='SOME_ERROR'):
token_revoke_command(gsuite_client, {})
def test_datatransfer_list(gsuite_client, mocker):
"""
Scenario: gsuite-datatransfer-list command is called with valid arguments.
Given:
- Command args.
When:
- Calling datatransfer_list with command arguments.
Then:
- Ensure CommandResult should return data as expected.
"""
from GSuiteAdmin import datatransfer_list_command
with open('test_data/datatransfer_list_response.json') as file:
api_response = json.load(file)
with open('test_data/datatransfer_list_entry_context.json') as file:
expected_entry_context = json.load(file)
mocker.patch(MOCKER_HTTP_METHOD, side_effect=[api_response, {}])
command_result = datatransfer_list_command(gsuite_client, {})
assert command_result.readable_output == expected_entry_context['HumanReadable']
assert command_result.outputs == expected_entry_context['EntryContext']
assert command_result.raw_response == expected_entry_context['Contents']
assert datatransfer_list_command(gsuite_client, {}).readable_output == HR_MESSAGES['NO_RECORDS'].format(
'data transfer details')
def test_custom_user_schema_create(gsuite_client, mocker):
"""
Scenario: gsuite-custom-user-schema-create command is called with valid arguments.
Given:
- Command args.
When:
- Calling custom_user_schema_create with command arguments.
Then:
- Ensure CommandResult should return data as expected.
"""
from GSuiteAdmin import custom_user_schema_create_command
arguments = {
'customer_id': 'customer_id',
'schema_name': 'new121',
'schema_display_name': 'n2',
'field_raw_json': '{"fields": []}'
}
with open('test_data/custom_user_schema_response.json') as file:
api_response = json.load(file)
with open('test_data/custom_user_schema_create_entry_context.json') as file:
expected_entry_context = json.load(file)
mocker.patch(MOCKER_HTTP_METHOD, return_value=api_response)
command_result = custom_user_schema_create_command(gsuite_client, arguments)
assert command_result.readable_output == expected_entry_context['HumanReadable']
assert command_result.outputs == expected_entry_context['EntryContext'][
'GSuite.UserSchema(val.schemaId == obj.schemaId)']
assert command_result.raw_response == expected_entry_context['Contents']
assert command_result.outputs_key_field == 'schemaId'
assert command_result.outputs_prefix == 'GSuite.UserSchema'
def test_prepare_args_for_custom_user_schema_create_required_argument_error():
"""
Scenario: Required argument(s) are not provided.
Given:
- Command args.
When:
- Calling prepare_args_for_custom_user_schema_create with command arguments.
Then:
- Ensure ValueError should be raised with respective message.
"""
from GSuiteAdmin import prepare_args_for_custom_user_schema
with pytest.raises(ValueError, match=MESSAGES['REQUIRED_ARGS_CUSTOM_SCHEMA']):
prepare_args_for_custom_user_schema({})
def test_custom_user_schema_update(gsuite_client, mocker):
"""
Scenario: gsuite-custom-user-schema-update command is called with valid arguments.
Given:
- Command args.
When:
- Calling custom_user_schema_update with command arguments.
Then:
- Ensure CommandResult should return data as expected.
"""
from GSuiteAdmin import custom_user_schema_update_command
arguments = {
'customer_id': 'customer_id',
'schema_name': 'new1',
'schema_display_name': 'n1',
'field_raw_json': '{"fields": []}'
}
with open('test_data/custom_user_schema_response.json') as file:
api_response = json.load(file)
with open('test_data/custom_user_schema_update_entry_context.json') as file:
expected_entry_context = json.load(file)
mocker.patch(MOCKER_HTTP_METHOD, return_value=api_response)
command_result = custom_user_schema_update_command(gsuite_client, arguments)
assert command_result.readable_output == expected_entry_context['HumanReadable']
assert command_result.outputs == expected_entry_context['EntryContext'][
'GSuite.UserSchema(val.schemaId == obj.schemaId)']
assert command_result.raw_response == expected_entry_context['Contents']
assert command_result.outputs_key_field == 'schemaId'
assert command_result.outputs_prefix == 'GSuite.UserSchema'
def test_custom_user_schema_update_required_args_error(gsuite_client):
"""
Scenario: gsuite-custom-user-schema-update command is called with no required arguments.
Given:
- Command args.
When:
- Calling custom_user_schema_update with command arguments.
Then:
- Ensure CommandResult should return data as expected.
"""
from GSuiteAdmin import custom_user_schema_update_command
with pytest.raises(ValueError, match=MESSAGES['CUSTOM_SCHEMA_UPDATE_REQUIRED_ARGS']):
custom_user_schema_update_command(gsuite_client, {})
with pytest.raises(ValueError, match=MESSAGES['REQUIRED_ARGS_CUSTOM_SCHEMA']):
custom_user_schema_update_command(gsuite_client, {'schema_name': 'new_schema'})
@patch(MOCKER_HTTP_METHOD)
def test_datatransfer_request_create_command_success(mocker_http_request, gsuite_client):
"""
Scenario: For datatransfer_request_create command success.
Given:
- Command args.
When:
- Calling datatransfer_request_create command with the parameters provided.
Then:
- Ensure command's raw_response, outputs, readable_output, outputs_prefix should be as expected.
"""
from GSuiteAdmin import datatransfer_request_create_command
with open('test_data/data_transfer_request_create_test_data.json') as data:
test_data = json.load(data)
response_data = test_data['output']
mocker_http_request.return_value = response_data
result = datatransfer_request_create_command(gsuite_client, test_data['args'])
assert result.raw_response == response_data
assert result.outputs == response_data
assert result.readable_output.startswith(
"### " + HR_MESSAGES['DATATRANSFER_REQUEST_CREATE_SUCCESS'])
assert result.outputs_prefix == OUTPUT_PREFIX['DATA_TRANSFER_REQUEST_CREATE']
def test_get_transfer_params_list_from_str_invalid_param_format():
"""
Scenario: get_transfer_params_list_from_str invalid params provided.
Given:
- incorrect command arguments
When:
- Calling command method get_transfer_params_list_from_str.
Then:
- Ensure expected error output is being set.
"""
from GSuiteAdmin import get_transfer_params_list_from_str
with pytest.raises(ValueError, match=MESSAGES['DATATRANSFER_TRANSFER_PARAM_FORMAT_ERROR']):
get_transfer_params_list_from_str('abc')
def test_datatransfer_request_create_command_validation_failure(gsuite_client):
"""
Scenario: datatransfer_request_create command validation logic failure.
Given:
- incorrect command arguments
When:
- Calling command method datatransfer_request_create_command.
Then:
- Ensure expected error output is being set.
"""
from GSuiteAdmin import datatransfer_request_create_command
with pytest.raises(Exception, match=MESSAGES['DATATRANSFER_MISSING_ARGUMENT'].format('\'old_owner_id\'')):
datatransfer_request_create_command(gsuite_client, {})
def test_prepare_datatransfer_payload_from_arguments():
"""
Scenario: For prepare_datatransfer_payload_from_arguments testing.
Given:
- datatransfer_request_create command arguments.
When:
- Calling prepare_datatransfer_payload_from_arguments method
Then:
- Ensure method returns valid request_payload
"""
with open('test_data/data_transfer_request_create_test_data.json', encoding='utf-8') as data:
test_data = json.load(data)
args = test_data['args']
output = test_data['request_payload']
from GSuiteAdmin import prepare_datatransfer_payload_from_arguments
assert prepare_datatransfer_payload_from_arguments(args) == output
@patch(MOCKER_HTTP_METHOD)
def test_user_delete_command(gsuite_client):
"""
Scenario: user delete command successful execution.
Given:
- Working API integration and correct parameters
When:
- Calling command method user_delete_command.
Then:
- Ensure expected human readable output is being set.
"""
from GSuiteAdmin import user_delete_command
response = user_delete_command(gsuite_client, {'user_key': 'user1'})
assert response.readable_output == HR_MESSAGES['USER_DELETE'].format('user1')
def test_user_update_command(gsuite_client, mocker):
"""
Scenario: gsuite-user-update should works if valid arguments are provided.
Given:
- Command args.
When:
- Calling gsuite-user-update command with the arguments provided.
Then:
- Ensure CommandResult entry should be as expected.
"""
from GSuiteAdmin import user_update_command
with open('test_data/user_create_args.json', 'r') as file:
args = json.load(file)
args['archived'] = 'true'
args['org_unit_path'] = '\\'
with open('test_data/user_update_response.json') as file:
api_response = json.load(file)
with open('test_data/user_update_entry_context.json') as file:
expected_entry_context = json.load(file)
mocker.patch('GSuiteAdmin.GSuiteClient.http_request', return_value=api_response)
command_result = user_update_command(gsuite_client, args)
assert command_result.readable_output == expected_entry_context['HumanReadable']
assert command_result.outputs == expected_entry_context['EntryContext']['GSuite.User(val.id == obj.id)']
assert command_result.raw_response == expected_entry_context['Contents']
assert command_result.outputs_key_field == ['id']
assert command_result.outputs_prefix == 'GSuite.User'
# New Unit Tests
def util_load_json(path):
with open(path, mode='r', encoding='utf-8') as f:
return json.loads(f.read())
class MockResponse:
""" This class will be used to mock a request response (only the json function in the requests.Response class) """
def __init__(self, json_data):
self.json_data = json_data
def json(self):
return self.json_data
CHROMEOS_ACTION_ERROR_CASES = [
('Delinquent account', MESSAGES.get('INVALID_RESOURCE_CUSTOMER_ID_ERROR', '')),
('Some other error', 'Some other error'),
]
@pytest.mark.parametrize('error_message, parsed_error_message', CHROMEOS_ACTION_ERROR_CASES)
def test_invalid_chromeos_action_command(mocker, gsuite_client, error_message, parsed_error_message):
"""
Given:
- A client, a resource id, and an action to execute on the chromeOS device.
When:
- Running the google_chromeos_device_action_command command, and receiving an error from the API.
Then:
- Validate that the ambiguous error message is mapped to a more human readable error message.
"""
from GSuiteAdmin import gsuite_chromeos_device_action_command
from CommonServerPython import DemistoException
mocker.patch(MOCKER_HTTP_METHOD,
side_effect=DemistoException(message=error_message))
with pytest.raises(DemistoException) as e:
gsuite_chromeos_device_action_command(client=gsuite_client,
args={'customer_id': 'customer_id', 'resource_id': 'wrong_resource_id',
'action': 'some_action'})
assert parsed_error_message in str(e)
TEST_DATA_INVALID_PAGINATION_ARGUMENTS = [
({'page_size': '3', 'page_token': 'some_token', 'limit': '25'}, ('please supply either the argument limit,'
' or the argument page_token, or the arguments'
' page_token and page_size together')),
({'limit': '0'}, 'The limit argument can\'t be negative or equal to zero'),
({'limit': '-78'}, 'The limit argument can\'t be negative or equal to zero'),
({'page_token': 'some_token', 'page_size': '101'}, 'The maximum page size is')
]
@pytest.mark.parametrize('args, error_message', TEST_DATA_INVALID_PAGINATION_ARGUMENTS)
def test_invalid_pagination_arguments(args, error_message):
"""
Given:
- The pagination arguments supplied by the user.
When:
- Running the function prepare_pagination_arguments to check the content of the pagination arguments.
Then:
- Validate that an exception is thrown in response to invalid pagination arguments.
"""
from GSuiteAdmin import prepare_pagination_arguments
from CommonServerPython import DemistoException, arg_to_number
with pytest.raises(DemistoException) as e:
prepare_pagination_arguments(page_size=arg_to_number(args.get('page_size', '')),
page_token=args.get('page_toke', ''),
limit=arg_to_number(args.get('limit', '')))
assert error_message in str(e)
def test_chromeos_device_action(mocker, gsuite_client):
"""
Given:
- A client, a resource id (that identifies a mobile device), and an action that affects the chromeos device
When:
- The command google-chromeosdevice-action is run with a correct action argument
Then:
- A CommandResults is returned that marks the command as successful
"""
from GSuiteAdmin import gsuite_chromeos_device_action_command
from CommonServerPython import CommandResults
expected_command_result = CommandResults(
readable_output=HR_MESSAGES.get('CHROMEOS_DEVICE_ACTION_SUCCESS', '').format('resource_id'),
)
mocker.patch(MOCKER_HTTP_METHOD, return_value={})
command_result = gsuite_chromeos_device_action_command(client=gsuite_client,
args={'customer_id': 'customer_id', 'resource_id': 'resource_id',
'action': 'correct_action'})
assert command_result.to_context() == expected_command_result.to_context()
TEST_DATA_AUTO_PAGINATION_FILES_CASES = [
('test_data/mobile_devices_list/automatic_pagination/raw_results_3_pages.json',
'test_data/mobile_devices_list/automatic_pagination/parsed_results_3_pages.json', {'limit': 7}),
('test_data/mobile_devices_list/automatic_pagination/raw_results_2_pages.json',
'test_data/mobile_devices_list/automatic_pagination/parsed_results_2_pages.json', {'limit': 6})
]
@pytest.mark.parametrize('raw_results_file, parsed_results_file, pagination_args', TEST_DATA_AUTO_PAGINATION_FILES_CASES)
def test_mobile_device_list_automatic_pagination_result_instance(mocker, gsuite_client, raw_results_file, parsed_results_file,
pagination_args):
# Since there is not enough mobile devices to actually do pagination, all the requests being mocked
# are under the impression that the maximum page is of size 3, this will give us the ability to mock the pagination process
"""
Given:
- Raw responses representing mobile devices and a limit argument.
When:
- Running the command device_list_automatic_pagination to parse the raw results and return an instance of
PaginationResult that hold the relevant data using automatic pagination.
Then:
- Validate the content of the PaginationResult instance.
"""
from GSuiteAdmin import MobileDevicesConfig, device_list_automatic_pagination, mobile_device_list_request
query_params = {'projection': 'full', 'order_by': 'name', 'sort_order': 'descending', **pagination_args}
raw_responses = util_load_json(raw_results_file)
expected_pagination_result_instance = create_pagination_result_automatic_instance(
raw_responses=raw_responses,
response_devices_list_key=MobileDevicesConfig.response_devices_list_key)
mocker.patch(MOCKER_HTTP_METHOD, side_effect=raw_responses)
pagination_result = device_list_automatic_pagination(request_by_device_type=mobile_device_list_request,
client=gsuite_client,
customer_id='customer_id',
query_params=query_params,
response_devices_list_key=MobileDevicesConfig.response_devices_list_key,
**pagination_args)
assert pagination_result == expected_pagination_result_instance
@pytest.mark.parametrize('raw_results_file, parsed_results_file, pagination_args', TEST_DATA_AUTO_PAGINATION_FILES_CASES)
def test_mobile_device_list_automatic_pagination(mocker, gsuite_client, raw_results_file, parsed_results_file, pagination_args):
# Since there is not enough mobile devices to actually do pagination, all the requests being mocked
# are under the impression that the maximum page is of size 3, this will give us the ability to mock the pagination process
"""
Given:
- A client and query parameters for the API.
When:
- Running the command google_mobile_device_list_command to retrieve the mobile devices' list using automatic pagination.
Then:
- Validate the content of the context data and human readable.
"""
from GSuiteAdmin import gsuite_mobile_device_list_command
args = {'projection': 'full', 'order_by': 'name', 'sort_order': 'descending', **pagination_args, 'customer_id': 'customer_id'}
raw_responses = util_load_json(raw_results_file)
expected_command_results = util_load_json(parsed_results_file)
mocker.patch(MOCKER_HTTP_METHOD, side_effect=raw_responses)
command_results = gsuite_mobile_device_list_command(client=gsuite_client, args=args)
to_context = command_results.to_context()
assert to_context.get('HumanReadable') == expected_command_results.get('HumanReadable')
assert to_context.get('EntryContext') == expected_command_results.get('EntryContext')
TEST_DATA_MANUAL_PAGINATION_FILES_CASES = [
('test_data/mobile_devices_list/manual_pagination/raw_results_with_next_page_token.json',
'test_data/mobile_devices_list/manual_pagination/parsed_results_with_next_page_token.json',
{'page_token': 'dummy_next_page_token', 'page_size': 2}),
]
@pytest.mark.parametrize('raw_results_file, parsed_results_file, pagination_args', TEST_DATA_MANUAL_PAGINATION_FILES_CASES)
def test_mobile_device_list_manual_pagination_result_instance(mocker, gsuite_client, raw_results_file, parsed_results_file,
pagination_args):
# Since there is not enough mobile devices to actually do pagination, all the requests being mocked
# are under the impression that the maximum page is of size 3, this will give us the ability to mock the pagination process
"""
Given:
- Raw responses representing mobile devices, and page_token and page_size arguments.
When:
- Running the command device_list_automatic_pagination to parse the raw results and return an instance of
PaginationResult that hold the relevant data using manual pagination.
Then:
- Validate the content of the PaginationResult instance.
"""
from GSuiteAdmin import MobileDevicesConfig, device_list_manual_pagination, mobile_device_list_request
query_params = {'projection': 'full', 'order_by': 'name', 'sort_order': 'descending', **pagination_args}
raw_responses = util_load_json(raw_results_file)
expected_pagination_result_instance = create_pagination_result_manual_instance(
raw_responses=raw_responses,
response_devices_list_key=MobileDevicesConfig.response_devices_list_key)
mocker.patch(MOCKER_HTTP_METHOD, side_effect=raw_responses)
pagination_result = device_list_manual_pagination(request_by_device_type=mobile_device_list_request,
client=gsuite_client,
customer_id='customer_id',
query_params=query_params,
response_devices_list_key=MobileDevicesConfig.response_devices_list_key,
**pagination_args)
assert pagination_result == expected_pagination_result_instance
@pytest.mark.parametrize('raw_results_file, parsed_results_file, pagination_args', TEST_DATA_MANUAL_PAGINATION_FILES_CASES)
def test_mobile_device_list_manual_pagination(mocker, gsuite_client, raw_results_file, parsed_results_file, pagination_args):
# Since there is not enough mobile devices to actually do pagination, all the requests being mocked
# are under the impression that the maximum page is of size 3, this will give us the ability to mock the pagination process
"""
Given:
- A client and query parameters for the API.
When:
- Running the command google_mobile_device_list_command to retrieve the mobile devices' list using manual pagination.
Then:
- Validate the content of the context data and human readable.
"""
from GSuiteAdmin import gsuite_mobile_device_list_command
args = {'projection': 'full', 'order_by': 'name', 'sort_order': 'descending', **pagination_args, 'customer_id': 'customer_id'}
raw_responses = util_load_json(raw_results_file)
expected_command_results = util_load_json(parsed_results_file)
mocker.patch(MOCKER_HTTP_METHOD, side_effect=raw_responses)
command_results = gsuite_mobile_device_list_command(client=gsuite_client, args=args)
to_context = command_results.to_context()
assert to_context.get('HumanReadable') == expected_command_results.get('HumanReadable')
assert to_context.get('EntryContext') == expected_command_results.get('EntryContext')
TEST_PAGINATION_ARGS_CASES = [
({'limit': '2'}),
({'page_size': '3'})
]
@pytest.mark.parametrize('pagination_args', TEST_PAGINATION_ARGS_CASES)
def test_mobile_device_list_empty_response(mocker, gsuite_client, pagination_args):
"""
Given:
- A client and query parameters for the API.
When:
- Running the command google_mobile_device_list_command to retrieve the mobile devices' and receiving no results.
Then:
- Validate the content of the context data and human readable.
"""
from GSuiteAdmin import gsuite_mobile_device_list_command
args = {'projection': 'full', 'order_by': 'name', 'sort_order': 'descending', **pagination_args, 'customer_id': 'customer_id'}
raw_responses = util_load_json('test_data/mobile_devices_list/no_results_found.json')
expected_command_results = util_load_json('test_data/mobile_devices_list/parsed_no_results_found.json')
mocker.patch(MOCKER_HTTP_METHOD, side_effect=raw_responses)
command_results = gsuite_mobile_device_list_command(client=gsuite_client, args=args)
to_context = command_results.to_context()
assert to_context.get('HumanReadable') == expected_command_results.get('HumanReadable')
assert to_context.get('EntryContext') == expected_command_results.get('EntryContext')
def create_pagination_result_automatic_instance(raw_responses: list[dict], response_devices_list_key: str) -> dict:
"""
This will create a PaginationResult instance that reflect automatic pagination in order to check the return values of
functions that return PaginationResult.
"""
mocked_data = []
for raw_response in raw_responses:
mocked_data.extend(raw_response.get(response_devices_list_key, []))
return {'data': mocked_data, 'raw_response': raw_responses}
def create_pagination_result_manual_instance(raw_responses: list[dict], response_devices_list_key: str) -> dict:
"""
This will create a PaginationResult instance that reflect manual pagination in order to check the return values of
functions that return PaginationResult.
"""
assert len(raw_responses) <= 1, 'The length of the mocked raw responses of a manual pagination should be at most 1.'
mocked_data = []
mocked_next_page_token = ''
for raw_response in raw_responses:
mocked_data.extend(raw_response.get(response_devices_list_key, []))
mocked_next_page_token = raw_response.get('nextPageToken', '')
return {'data': mocked_data, 'raw_response': raw_responses, 'next_page_token': mocked_next_page_token}
| [
"[email protected]"
] | |
07619507d6a85885836511e0715e4e168a19bafe | fd67592b2338105e0cd0b3503552d188b814ad95 | /test/test_models/test_unremovable_entry.py | b845f33b47466443e973f4d1d51dc0fcccaee15e | [] | no_license | E-goi/sdk-python | 175575fcd50bd5ad426b33c78bdeb08d979485b7 | 5cba50a46e1d288b5038d18be12af119211e5b9f | refs/heads/master | 2023-04-29T20:36:02.314712 | 2023-04-18T07:42:46 | 2023-04-18T07:42:46 | 232,095,340 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,051 | py | # coding: utf-8
"""
APIv3 (New)
# Introduction This is our new version of API. We invite you to start using it and give us your feedback # Getting Started E-goi can be integrated with many environments and programming languages via our REST API. We've created a developer focused portal to give your organization a clear and quick overview of how to integrate with E-goi. The developer portal focuses on scenarios for integration and flow of events. We recommend familiarizing yourself with all of the content in the developer portal, before start using our rest API. The E-goi APIv3 is served over HTTPS. To ensure data privacy, unencrypted HTTP is not supported. Request data is passed to the API by POSTing JSON objects to the API endpoints with the appropriate parameters. BaseURL = api.egoiapp.com # RESTful Services This API supports 5 HTTP methods: * <b>GET</b>: The HTTP GET method is used to **read** (or retrieve) a representation of a resource. * <b>POST</b>: The POST verb is most-often utilized to **create** new resources. * <b>PATCH</b>: PATCH is used for **modify** capabilities. The PATCH request only needs to contain the changes to the resource, not the complete resource * <b>PUT</b>: PUT is most-often utilized for **update** capabilities, PUT-ing to a known resource URI with the request body containing the newly-updated representation of the original resource. * <b>DELETE</b>: DELETE is pretty easy to understand. It is used to **delete** a resource identified by a URI. # Authentication We use a custom authentication method, you will need a apikey that you can find in your account settings. Below you will see a curl example to get your account information: #!/bin/bash curl -X GET 'https://api.egoiapp.com/my-account' \\ -H 'accept: application/json' \\ -H 'Apikey: <YOUR_APY_KEY>' Here you can see a curl Post example with authentication: #!/bin/bash curl -X POST 'http://api.egoiapp.com/tags' \\ -H 'accept: application/json' \\ -H 'Apikey: <YOUR_APY_KEY>' \\ -H 'Content-Type: application/json' \\ -d '{`name`:`Your custom tag`,`color`:`#FFFFFF`}' # SDK Get started quickly with E-goi with our integration tools. Our SDK is a modern open source library that makes it easy to integrate your application with E-goi services. * <a href='https://github.com/E-goi/sdk-java'>Java</a> * <a href='https://github.com/E-goi/sdk-php'>PHP</a> * <a href='https://github.com/E-goi/sdk-python'>Python</a> * <a href='https://github.com/E-goi/sdk-ruby'>Ruby</a> * <a href='https://github.com/E-goi/sdk-javascript'>Javascript</a> * <a href='https://github.com/E-goi/sdk-csharp'>C#</a> # Stream Limits Stream limits are security mesures we have to make sure our API have a fair use policy, for this reason, any request that creates or modifies data (**POST**, **PATCH** and **PUT**) is limited to a maximum of **20MB** of content length. If you arrive to this limit in one of your request, you'll receive a HTTP code **413 (Request Entity Too Large)** and the request will be ignored. To avoid this error in importation's requests, it's advised the request's division in batches that have each one less than 20MB. # Timeouts Timeouts set a maximum waiting time on a request's response. Our API, sets a default timeout for each request and when breached, you'll receive an HTTP **408 (Request Timeout)** error code. You should take into consideration that response times can vary widely based on the complexity of the request, amount of data being analyzed, and the load on the system and workspace at the time of the query. When dealing with such errors, you should first attempt to reduce the complexity and amount of data under analysis, and only then, if problems are still occurring ask for support. For all these reasons, the default timeout for each request is **10 Seconds** and any request that creates or modifies data (**POST**, **PATCH** and **PUT**) will have a timeout of **60 Seconds**. Specific timeouts may exist for specific requests, these can be found in the request's documentation. # Callbacks A callback is an asynchronous API request that originates from the API server and is sent to the client in response to a previous request sent by that client. The API will make a **POST** request to the address defined in the URL with the information regarding the event of interest and share data related to that event. <a href='/usecases/callbacks/' target='_blank'>[Go to callbacks documentation]</a> ***Note:*** Only http or https protocols are supported in the Url parameter. <security-definitions/> # noqa: E501
The version of the OpenAPI document: 3.0.0
Generated by: https://openapi-generator.tech
"""
import unittest
import egoi_api
from egoi_api.model.unremovable_entry import UnremovableEntry
from egoi_api import configuration
class TestUnremovableEntry(unittest.TestCase):
"""UnremovableEntry unit test stubs"""
_configuration = configuration.Configuration()
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
168587159267fcc2bcfa812c45b2ad9b99b148ba | 78c3082e9082b5b50435805723ae00a58ca88e30 | /03.AI알고리즘 소스코드/venv/Lib/site-packages/caffe2/python/parallel_workers_test.py | b1822ec259538d2302294aeadf78f99766856b06 | [] | no_license | jinStar-kimmy/algorithm | 26c1bc456d5319578110f3d56f8bd19122356603 | 59ae8afd8d133f59a6b8d8cee76790fd9dfe1ff7 | refs/heads/master | 2023-08-28T13:16:45.690232 | 2021-10-20T08:23:46 | 2021-10-20T08:23:46 | 419,217,105 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,620 | py |
import unittest
from caffe2.python import workspace, core
import caffe2.python.parallel_workers as parallel_workers
def create_queue():
queue = 'queue'
workspace.RunOperatorOnce(
core.CreateOperator(
"CreateBlobsQueue", [], [queue], num_blobs=1, capacity=1000
)
)
# Technically, blob creations aren't thread safe. Since the unittest below
# does RunOperatorOnce instead of CreateNet+RunNet, we have to precreate
# all blobs beforehand
for i in range(100):
workspace.C.Workspace.current.create_blob("blob_" + str(i))
workspace.C.Workspace.current.create_blob("status_blob_" + str(i))
workspace.C.Workspace.current.create_blob("dequeue_blob")
workspace.C.Workspace.current.create_blob("status_blob")
return queue
def create_worker(queue, get_blob_data):
def dummy_worker(worker_id):
blob = 'blob_' + str(worker_id)
workspace.FeedBlob(blob, get_blob_data(worker_id))
workspace.RunOperatorOnce(
core.CreateOperator(
'SafeEnqueueBlobs', [queue, blob], [blob, 'status_blob_' + str(worker_id)]
)
)
return dummy_worker
def dequeue_value(queue):
dequeue_blob = 'dequeue_blob'
workspace.RunOperatorOnce(
core.CreateOperator(
"SafeDequeueBlobs", [queue], [dequeue_blob, 'status_blob']
)
)
return workspace.FetchBlob(dequeue_blob)
class ParallelWorkersTest(unittest.TestCase):
def testParallelWorkers(self):
workspace.ResetWorkspace()
queue = create_queue()
dummy_worker = create_worker(queue, lambda worker_id: str(worker_id))
worker_coordinator = parallel_workers.init_workers(dummy_worker)
worker_coordinator.start()
for _ in range(10):
value = dequeue_value(queue)
self.assertTrue(
value in [b'0', b'1'], 'Got unexpected value ' + str(value)
)
self.assertTrue(worker_coordinator.stop())
def testParallelWorkersInitFun(self):
workspace.ResetWorkspace()
queue = create_queue()
dummy_worker = create_worker(
queue, lambda worker_id: workspace.FetchBlob('data')
)
workspace.FeedBlob('data', 'not initialized')
def init_fun(worker_coordinator, global_coordinator):
workspace.FeedBlob('data', 'initialized')
worker_coordinator = parallel_workers.init_workers(
dummy_worker, init_fun=init_fun
)
worker_coordinator.start()
for _ in range(10):
value = dequeue_value(queue)
self.assertEqual(
value, b'initialized', 'Got unexpected value ' + str(value)
)
# A best effort attempt at a clean shutdown
worker_coordinator.stop()
def testParallelWorkersShutdownFun(self):
workspace.ResetWorkspace()
queue = create_queue()
dummy_worker = create_worker(queue, lambda worker_id: str(worker_id))
workspace.FeedBlob('data', 'not shutdown')
def shutdown_fun():
workspace.FeedBlob('data', 'shutdown')
worker_coordinator = parallel_workers.init_workers(
dummy_worker, shutdown_fun=shutdown_fun
)
worker_coordinator.start()
self.assertTrue(worker_coordinator.stop())
data = workspace.FetchBlob('data')
self.assertEqual(data, b'shutdown', 'Got unexpected value ' + str(data))
| [
"[email protected]"
] | |
1afac3048abf80eaca5946d636d71eb80156108e | e3cf1af904b97af7fd35b7e972e5e6a7e13af8d3 | /TCPServer/neuroScanToolbox.py | 0789676aa01146eccebac44613ee796b150b8df2 | [
"MIT"
] | permissive | listenzcc/BCIMiddleware | 6da7449562d9128a40708b5d9a8e6f0f94e5c760 | 80f74731b4df7f6da84c5df0c67e0ca4e6af7102 | refs/heads/main | 2023-05-05T04:12:09.199424 | 2021-05-31T03:17:14 | 2021-05-31T03:17:14 | 352,538,624 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,978 | py | import time
import struct
import socket
import threading
import numpy as np
from . import logger
class NeuroScanDeviceClient(object):
'''NeuroScan Device Client.
The communication is in TCP socket,
the process is:
1.1. Connect to the device, @connect;
1.2. Send start scan request to device;
2. Start acquisition, @start_acq;
3. The device will send data every 0.04 seconds;
4. Stop acquisition, @stop_acq;
5.1. Send stop scan request to device;
5.2. Disconnect from the device, @disconnect.
'''
def __init__(self, ip_address, port, sample_rate, n_channels):
'''Initialize with Basic Parameters,
and connect to the device.
Args:
- @ip_address: The IP address of the device;
- @port: The port of the device;
- @sample_rate: The sample rate;
- @n_channels: The number of channels.
'''
self.ip_address = ip_address
self.port = port
self.sample_rate = sample_rate
self.n_channels = n_channels
self.compute_bytes_per_package()
logger.info(f'EEG Device client initialized.')
self.connect()
def compute_bytes_per_package(self, time_per_packet=0.04):
'''Compute the length of bytes in every data packet
Args:
- @time_per_packet: The interval between two data packets from the device.
Generates:
- @packet_time_point: The time points in each packets;
- @bytes_per_packet: The bytes length in each packet.
'''
packet_time_point = int(np.round(self.sample_rate * time_per_packet))
bytes_per_packet = (self.n_channels + 1) * packet_time_point * 4
self.packet_time_point = packet_time_point
self.bytes_per_packet = bytes_per_packet
def _unpack_data_fmt(self):
'''Generate built-in format for unpacking the data
Outs:
- The format.
'''
return '<' + str((self.n_channels + 1) * self.packet_time_point) + 'i'
def _unpack_header(self, header_packet):
'''The method of unpacking header.
Args:
- @header_packet: The header packet to be unpacked.
Outs:
- The contents in the header.
'''
chan_name = struct.unpack('>4s', header_packet[:4])
w_code = struct.unpack('>H', header_packet[4:6])
w_request = struct.unpack('>H', header_packet[6:8])
packet_size = struct.unpack('>I', header_packet[8:])
return (chan_name[0], w_code[0], w_request[0], packet_size[0])
def _unpack_data(self, data_packet):
'''The method of unpacking data.
Args:
- @data_packet: The data packet to be unpacked.
Outs:
- The data in matrix, the shape is (n_channels x time_points).
'''
data_trans = np.asarray(struct.unpack(self._unpack_data_fmt(),
data_packet)).reshape((-1, self.n_channels + 1)).T
return data_trans
def connect(self):
'''Connect to the device,
and start scanning.
'''
self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
SEND_BUF_SIZE = self.bytes_per_packet
RECV_BUF_SIZE = self.bytes_per_packet * 9
self.client.connect((self.ip_address, self.port))
self.client.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
self.client.setsockopt(
socket.SOL_SOCKET, socket.SO_SNDBUF, SEND_BUF_SIZE)
self.client.setsockopt(
socket.SOL_SOCKET, socket.SO_RCVBUF, RECV_BUF_SIZE)
logger.info('Established the connection to EEG Device.')
self.send(struct.pack('12B', 67, 84, 82, 76,
0, 2, 0, 1, 0, 0, 0, 0)) # 开始获取数据
header_packet = self.receive_data(24)
logger.debug(f'Received for ACQ request: {header_packet}')
def send(self, msg):
'''Send message to the device.
Args:
- @msg: The message to be sent, it should be of bytes.
'''
self.client.send(msg)
logger.debug(f'Sent {msg}')
def start_acq(self):
'''Send start acquisition message to the device.
A thread will be started to collecting data from the device.
Vars:
- @data: Where the data will be stored in;
- @data_length: The accumulated length of the data;
- @collecting: The flag of collecting.
'''
self.send(struct.pack('12B', 67, 84, 82, 76,
0, 3, 0, 3, 0, 0, 0, 0)) # 开始采集
self.data = []
self.data_length = 0
self.collecting = True
t = threading.Thread(target=self.collect)
t.setDaemon(True)
t.start()
def collect(self):
'''The collecting method used by start_acq.
- It will collect data until [collecting] is set to False;
- It will report data length every 1000 units;
- It may complain about connection aborted on close, it is fine.
'''
logger.info('Collection Start.')
while self.collecting:
try:
d = self.get_data()
self.data.append(d)
self.data_length += d.shape[1]
if self.data_length % 1000 == 0:
logger.debug(
f'Accumulated data length: {self.data_length}')
except ConnectionAbortedError:
logger.warning(
'Connection to the device is closed. This can be normal if collecting is done.')
break
logger.info('Collection Done.')
def get_data(self):
'''Get the data form the latest packet.
The packet is in two parts:
- header: The latest separation shows the length of the data body;
- data: The data body;
- The length of the data body should be equal with the [bytes_per_packet] as prior computed.
Outs:
- new_data_temp: The latest data, the shape is (n_channels x time_points(0.04 seconds)).
'''
tmp_header = self.receive_data(12)
details_header = self._unpack_header(tmp_header)
if details_header[-1] == self.bytes_per_packet:
pass
else:
print(
f'Warning, received data has {details_header[-1]} bytes, and required data should have {self.bytes_per_packet} bytes. The EEG channels setting may be incorrect')
bytes_data = self.receive_data(self.bytes_per_packet)
new_data_trans = self._unpack_data(bytes_data)
new_data_temp = np.empty(new_data_trans.shape, dtype=np.float)
new_data_temp[:-1, :] = new_data_trans[:-1, :] * 0.0298 # 单位 uV
new_data_temp[-1, :] = np.zeros(new_data_trans.shape[1])
return new_data_temp
def get_all(self):
'''Get the accumulated data as a matrix, the shape is (n_channels x time_points(accumulated)).
Outs:
- The accumulated data.
'''
if not hasattr(self, 'data'):
return None
if self.data == []:
return np.zeros((self.n_channels, 0))
return np.concatenate(self.data, axis=1)
def receive_data(self, n_bytes):
'''The built-in method of receiving [n_bytes] length bytes from the device,
it will read the buffer until it reached to the [n_bytes] length.
Args:
- @n_bytes: The length of the bytes to be fetched.
Outs:
- The [n_bytes] length bytes.
'''
b_data = b''
flag_stop_recv = False
b_count = 0
while not flag_stop_recv:
tmp_bytes = self.client.recv(n_bytes - b_count)
if b_count == n_bytes or not tmp_bytes:
flag_stop_recv = True
b_count += len(tmp_bytes)
b_data += tmp_bytes
return b_data
def stop_acq(self):
'''Send stopping acquisition message to the device,
and the collecting threading will be stopped accordingly,
it will also clear the existing contents in the buffer.
'''
self.collecting = False
time.sleep(0.1)
self.send(struct.pack('12B', 67, 84, 82, 76,
0, 3, 0, 4, 0, 0, 0, 0)) # 结束采集
self.get_data()
def disconnect(self):
'''Disconnect from the device.
'''
self.send(struct.pack('12B', 67, 84, 82, 76,
0, 2, 0, 2, 0, 0, 0, 0)) # 结束获取数据
self.send(struct.pack('12B', 67, 84, 82, 76,
0, 1, 0, 2, 0, 0, 0, 0)) # 关闭连接
self.client.close()
logger.info(f'Closed Connection to Device.')
| [
"[email protected]"
] | |
521b2928efcb138c4ef38d26f04b6f9b956f728e | 88863cb16f35cd479d43f2e7852d20064daa0c89 | /HelpingSantasHelpers/download/eval_code/hours.py | 92c418e0a8a4828e94b1d7876b0ac4442d3fda3b | [] | no_license | chrishefele/kaggle-sample-code | 842c3cd766003f3b8257fddc4d61b919e87526c4 | 1c04e859c7376f8757b011ed5a9a1f455bd598b9 | refs/heads/master | 2020-12-29T12:18:09.957285 | 2020-12-22T20:16:35 | 2020-12-22T20:16:35 | 238,604,678 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,379 | py |
import datetime
class Hours:
""" Hours class takes care of time accounting. Note that convention is
9:00-9:05 is work taking place on the 00, 01, 02, 03, 04 minutes, so 5 minutes of work.
Elf is available for next work at 9:05
Class members day_start, day_end are in minutes relative to the day
"""
def __init__(self):
self.hours_per_day = 10 # 10 hour day: 9 - 19
self.day_start = 9 * 60
self.day_end = (9 + self.hours_per_day) * 60
self.reference_start_time = datetime.datetime(2014, 1, 1, 0, 0)
self.minutes_in_24h = 24 * 60
@staticmethod
def convert_to_minute(arrival):
""" Converts the arrival time string to minutes since the reference start time,
Jan 1, 2014 at 00:00 (aka, midnight Dec 31, 2013)
:param arrival: string in format '2014 12 17 7 03' for Dec 17, 2014 at 7:03 am
:return: integer (minutes since arrival time)
"""
time = arrival.split(' ')
dd = datetime.datetime(int(time[0]), int(time[1]), int(time[2]), int(time[3]), int(time[4]))
age = dd - datetime.datetime(2014, 1, 1, 0, 0)
return int(age.total_seconds() / 60)
def is_sanctioned_time(self, minute):
""" Return boolean True or False if a given time (in minutes) is a sanctioned working day minute. """
return ((minute - self.day_start) % self.minutes_in_24h) < (self.hours_per_day * 60)
def get_sanctioned_breakdown(self, start_minute, duration):
""" Whole days (24-hr time periods) contribute fixed quantities of sanctioned and unsanctioned time. After
accounting for the whole days in the duration, the remainder minutes are tabulated as un/sanctioned.
:param start_minute:
:param duration:
:return:
"""
full_days = duration / (self.minutes_in_24h)
sanctioned = full_days * self.hours_per_day * 60
unsanctioned = full_days * (24 - self.hours_per_day) * 60
remainder_start = start_minute + full_days * self.minutes_in_24h
for minute in xrange(remainder_start, start_minute+duration):
if self.is_sanctioned_time(minute):
sanctioned += 1
else:
unsanctioned += 1
return sanctioned, unsanctioned
def next_sanctioned_minute(self, minute):
""" Given a minute, finds the next sanctioned minute.
:param minute: integer representing a minute since reference time
:return: next sanctioned minute
"""
# next minute is a sanctioned minute
if self.is_sanctioned_time(minute) and self.is_sanctioned_time(minute+1):
return minute + 1
num_days = minute / self.minutes_in_24h
return self.day_start + (num_days + 1) * self.minutes_in_24h
def apply_resting_period(self, start, num_unsanctioned):
""" Enforces the rest period and returns the minute when the elf is next available for work.
Rest period is only applied to sanctioned work hours.
:param start: minute the REST period starts
:param num_unsanctioned: always > 0 number of unsanctioned minutes that need resting minutes
:return: next available minute after rest period has been applied
"""
num_days_since_jan1 = start / self.minutes_in_24h
rest_time = num_unsanctioned
rest_time_in_working_days = rest_time / (60 * self.hours_per_day)
rest_time_remaining_minutes = rest_time % (60 * self.hours_per_day)
# rest time is only applied to sanctioned work hours. If local_start is at an unsanctioned time,
# need to set it to be the next start of day
local_start = start % self.minutes_in_24h # minute of the day (relative to a current day) the work starts
if local_start < self.day_start:
local_start = self.day_start
elif local_start > self.day_end:
num_days_since_jan1 += 1
local_start = self.day_start
if local_start + rest_time_remaining_minutes > self.day_end:
rest_time_in_working_days += 1
rest_time_remaining_minutes -= (self.day_end - local_start)
local_start = self.day_start
total_days = num_days_since_jan1 + rest_time_in_working_days
return total_days * self.minutes_in_24h + local_start + rest_time_remaining_minutes
| [
"[email protected]"
] | |
c037677bd40802d525c9b0cc48e7980d0d9370cd | cb73fe89463892c8c147c6995e220f5b1635fabb | /AtCoder Beginner Contest 157/q4.py | 2f3997be6ae6197dd3f841423ebfccc58d44d5dc | [] | no_license | Haraboo0814/AtCoder | 244f6fd17e8f6beee2d46fbfaea6a8e798878920 | 7ad794fd85e8d22d4e35087ed38f453da3c573ca | refs/heads/master | 2023-06-15T20:08:37.348078 | 2021-07-17T09:31:30 | 2021-07-17T09:31:30 | 254,162,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,430 | py | class UnionFind():
def __init__(self, n):
self.n = n
self.parents = [-1] * n
def find(self, x):
if self.parents[x] < 0:
return x
self.parents[x] = self.find(self.parents[x])
return self.parents[x]
def unite(self, x, y):
x = self.find(x)
y = self.find(y)
if x == y:
return
if self.parents[x] > self.parents[y]:
x, y = y, x
self.parents[x] += self.parents[y]
self.parents[y] = x
def same(self, x, y):
return self.find(x) == self.find(y)
def size(self, x):
return -self.parents[self.find(x)]
N, M, K = map(int, input().split())
uf = UnionFind(N)
friends = [[] for _ in range(N)] # 友達関係
blocks = [[] for _ in range(N)] # ブロック関係
for i in range(M):
a, b = map(int, input().split())
# 相互友達関係の追加
friends[a - 1].append(b - 1)
friends[b - 1].append(a - 1)
# 併合
uf.unite(a - 1, b - 1)
for i in range(K):
c, d = map(int, input().split())
if uf.same(c - 1, d - 1):
# 同じグループ内の場合相互ブロック関係の追加
blocks[c - 1].append(d - 1)
blocks[d - 1].append(c - 1)
ans = []
for i in range(N):
# グループ内の人数 - 自身 - ブロック人数 - 友達人数
ans.append(uf.size(i) - 1 - len(blocks[i]) - len(friends[i]))
print(*ans)
| [
"[email protected]"
] | |
d637b21e1ece6c029c4c29138a0c0a4fab9eb9c0 | e0731ac7bd6a9fcb386d9c5d4181c9d549ab1d02 | /desafio81.py | b0763c623e58fc03a19757c78b94ebd4003ba32e | [] | no_license | lportinari/Desafios-Python-Curso-em-Video | 3ab98b87a2178448b3e53031b86522558c31c099 | cd7662ddfe371e48e5aabc6e86e23dc6337405fb | refs/heads/master | 2020-04-29T11:09:25.689901 | 2019-06-23T23:58:06 | 2019-06-23T23:58:06 | 176,087,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 692 | py | """Crie um programa que vai ler vários números e colocar em uma lista. Depois disso, mostre:
A) Quantos números foram digitados.
B) A lista de valores, ordenada de forma decrescente.
C) Se o valor 5 foi digitado e está ou não na lista."""
lista = []
while True:
lista.append(int(input('Digite um valor: ')))
resp = str(input('Quer continuar? [S/N]: ')).strip()[0]
if resp in 'Nn':
break
print('-=' * 30)
lista.sort(reverse=True)
print(f'Você digitou {len(lista)} elementos.')
print(f'Os valores em ordem decrescente são {lista}')
if 5 in lista:
print('O valor 5 faz parte da lista.')
else:
print('O valor 5 não foi encontrado na lista.') | [
"[email protected]"
] | |
b60d9811abfaa3ba27b15660b09b51606387d2df | f907f8ce3b8c3b203e5bb9d3be012bea51efd85f | /cakes_and_donuts.py | 79ab95d0266b3498fe901472d27b5c41b4c5c0eb | [] | no_license | KohsukeKubota/Atcoder-practice | 3b4b986395551443f957d1818d6f9a0bf6132e90 | 52554a2649445c2760fc3982e722854fed5b8ab1 | refs/heads/master | 2020-08-26T15:17:29.344402 | 2019-10-26T11:14:24 | 2019-10-26T11:14:24 | 217,052,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py | N = int(input())
cnum = 25
dnum = 14
res = 0
for i in range(cnum):
for j in range(dnum):
amm = 4 * i + 7 * j
if amm == N:
res += 1
if res > 0:
print('Yes')
else:
print('No')
| [
"[email protected]"
] | |
0ee14c848c8249d51d714cc4ece8833ee0d8baa6 | a7ded5d3d19a98e61a44189cffe3703f7938e0db | /xero_python/payrolluk/models/employee_statutory_sick_leave.py | f19c0b3a4659dd3bb73d7a6d4b8dcc3c663ae293 | [
"MIT"
] | permissive | liseekeralbert/xero-python | dfd1076344f763d74f81f701e32600cf88bcc7b2 | d27ab1894ecd84d2a9af0ca91583593756b21ab3 | refs/heads/master | 2022-12-16T07:41:14.331308 | 2020-09-18T17:12:35 | 2020-09-18T17:12:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,845 | py | # coding: utf-8
"""
Xero Payroll UK
This is the Xero Payroll API for orgs in the UK region. # noqa: E501
OpenAPI spec version: 2.3.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
from xero_python.models import BaseModel
class EmployeeStatutorySickLeave(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
"statutory_leave_id": "str",
"employee_id": "str",
"leave_type_id": "str",
"start_date": "date",
"end_date": "date",
"type": "str",
"status": "str",
"work_pattern": "list[str]",
"is_pregnancy_related": "bool",
"sufficient_notice": "bool",
"is_entitled": "bool",
"entitlement_weeks_requested": "float",
"entitlement_weeks_qualified": "float",
"entitlement_weeks_remaining": "float",
"overlaps_with_other_leave": "bool",
"entitlement_failure_reasons": "list[str]",
}
attribute_map = {
"statutory_leave_id": "statutoryLeaveID",
"employee_id": "employeeID",
"leave_type_id": "leaveTypeID",
"start_date": "startDate",
"end_date": "endDate",
"type": "type",
"status": "status",
"work_pattern": "workPattern",
"is_pregnancy_related": "isPregnancyRelated",
"sufficient_notice": "sufficientNotice",
"is_entitled": "isEntitled",
"entitlement_weeks_requested": "entitlementWeeksRequested",
"entitlement_weeks_qualified": "entitlementWeeksQualified",
"entitlement_weeks_remaining": "entitlementWeeksRemaining",
"overlaps_with_other_leave": "overlapsWithOtherLeave",
"entitlement_failure_reasons": "entitlementFailureReasons",
}
def __init__(
self,
statutory_leave_id=None,
employee_id=None,
leave_type_id=None,
start_date=None,
end_date=None,
type=None,
status=None,
work_pattern=None,
is_pregnancy_related=None,
sufficient_notice=None,
is_entitled=None,
entitlement_weeks_requested=None,
entitlement_weeks_qualified=None,
entitlement_weeks_remaining=None,
overlaps_with_other_leave=None,
entitlement_failure_reasons=None,
): # noqa: E501
"""EmployeeStatutorySickLeave - a model defined in OpenAPI""" # noqa: E501
self._statutory_leave_id = None
self._employee_id = None
self._leave_type_id = None
self._start_date = None
self._end_date = None
self._type = None
self._status = None
self._work_pattern = None
self._is_pregnancy_related = None
self._sufficient_notice = None
self._is_entitled = None
self._entitlement_weeks_requested = None
self._entitlement_weeks_qualified = None
self._entitlement_weeks_remaining = None
self._overlaps_with_other_leave = None
self._entitlement_failure_reasons = None
self.discriminator = None
if statutory_leave_id is not None:
self.statutory_leave_id = statutory_leave_id
self.employee_id = employee_id
self.leave_type_id = leave_type_id
self.start_date = start_date
self.end_date = end_date
if type is not None:
self.type = type
if status is not None:
self.status = status
self.work_pattern = work_pattern
self.is_pregnancy_related = is_pregnancy_related
self.sufficient_notice = sufficient_notice
if is_entitled is not None:
self.is_entitled = is_entitled
if entitlement_weeks_requested is not None:
self.entitlement_weeks_requested = entitlement_weeks_requested
if entitlement_weeks_qualified is not None:
self.entitlement_weeks_qualified = entitlement_weeks_qualified
if entitlement_weeks_remaining is not None:
self.entitlement_weeks_remaining = entitlement_weeks_remaining
if overlaps_with_other_leave is not None:
self.overlaps_with_other_leave = overlaps_with_other_leave
if entitlement_failure_reasons is not None:
self.entitlement_failure_reasons = entitlement_failure_reasons
@property
def statutory_leave_id(self):
"""Gets the statutory_leave_id of this EmployeeStatutorySickLeave. # noqa: E501
The unique identifier (guid) of a statutory leave # noqa: E501
:return: The statutory_leave_id of this EmployeeStatutorySickLeave. # noqa: E501
:rtype: str
"""
return self._statutory_leave_id
@statutory_leave_id.setter
def statutory_leave_id(self, statutory_leave_id):
"""Sets the statutory_leave_id of this EmployeeStatutorySickLeave.
The unique identifier (guid) of a statutory leave # noqa: E501
:param statutory_leave_id: The statutory_leave_id of this EmployeeStatutorySickLeave. # noqa: E501
:type: str
"""
self._statutory_leave_id = statutory_leave_id
@property
def employee_id(self):
"""Gets the employee_id of this EmployeeStatutorySickLeave. # noqa: E501
The unique identifier (guid) of the employee # noqa: E501
:return: The employee_id of this EmployeeStatutorySickLeave. # noqa: E501
:rtype: str
"""
return self._employee_id
@employee_id.setter
def employee_id(self, employee_id):
"""Sets the employee_id of this EmployeeStatutorySickLeave.
The unique identifier (guid) of the employee # noqa: E501
:param employee_id: The employee_id of this EmployeeStatutorySickLeave. # noqa: E501
:type: str
"""
if employee_id is None:
raise ValueError(
"Invalid value for `employee_id`, must not be `None`"
) # noqa: E501
self._employee_id = employee_id
@property
def leave_type_id(self):
"""Gets the leave_type_id of this EmployeeStatutorySickLeave. # noqa: E501
The unique identifier (guid) of the \"Statutory Sick Leave (non-pensionable)\" pay item # noqa: E501
:return: The leave_type_id of this EmployeeStatutorySickLeave. # noqa: E501
:rtype: str
"""
return self._leave_type_id
@leave_type_id.setter
def leave_type_id(self, leave_type_id):
"""Sets the leave_type_id of this EmployeeStatutorySickLeave.
The unique identifier (guid) of the \"Statutory Sick Leave (non-pensionable)\" pay item # noqa: E501
:param leave_type_id: The leave_type_id of this EmployeeStatutorySickLeave. # noqa: E501
:type: str
"""
if leave_type_id is None:
raise ValueError(
"Invalid value for `leave_type_id`, must not be `None`"
) # noqa: E501
self._leave_type_id = leave_type_id
@property
def start_date(self):
"""Gets the start_date of this EmployeeStatutorySickLeave. # noqa: E501
The date when the leave starts # noqa: E501
:return: The start_date of this EmployeeStatutorySickLeave. # noqa: E501
:rtype: date
"""
return self._start_date
@start_date.setter
def start_date(self, start_date):
"""Sets the start_date of this EmployeeStatutorySickLeave.
The date when the leave starts # noqa: E501
:param start_date: The start_date of this EmployeeStatutorySickLeave. # noqa: E501
:type: date
"""
if start_date is None:
raise ValueError(
"Invalid value for `start_date`, must not be `None`"
) # noqa: E501
self._start_date = start_date
@property
def end_date(self):
"""Gets the end_date of this EmployeeStatutorySickLeave. # noqa: E501
The date when the leave ends # noqa: E501
:return: The end_date of this EmployeeStatutorySickLeave. # noqa: E501
:rtype: date
"""
return self._end_date
@end_date.setter
def end_date(self, end_date):
"""Sets the end_date of this EmployeeStatutorySickLeave.
The date when the leave ends # noqa: E501
:param end_date: The end_date of this EmployeeStatutorySickLeave. # noqa: E501
:type: date
"""
if end_date is None:
raise ValueError(
"Invalid value for `end_date`, must not be `None`"
) # noqa: E501
self._end_date = end_date
@property
def type(self):
"""Gets the type of this EmployeeStatutorySickLeave. # noqa: E501
the type of statutory leave # noqa: E501
:return: The type of this EmployeeStatutorySickLeave. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this EmployeeStatutorySickLeave.
the type of statutory leave # noqa: E501
:param type: The type of this EmployeeStatutorySickLeave. # noqa: E501
:type: str
"""
self._type = type
@property
def status(self):
"""Gets the status of this EmployeeStatutorySickLeave. # noqa: E501
the type of statutory leave # noqa: E501
:return: The status of this EmployeeStatutorySickLeave. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this EmployeeStatutorySickLeave.
the type of statutory leave # noqa: E501
:param status: The status of this EmployeeStatutorySickLeave. # noqa: E501
:type: str
"""
self._status = status
@property
def work_pattern(self):
"""Gets the work_pattern of this EmployeeStatutorySickLeave. # noqa: E501
The days of the work week the employee is scheduled to work at the time the leave is taken # noqa: E501
:return: The work_pattern of this EmployeeStatutorySickLeave. # noqa: E501
:rtype: list[str]
"""
return self._work_pattern
@work_pattern.setter
def work_pattern(self, work_pattern):
"""Sets the work_pattern of this EmployeeStatutorySickLeave.
The days of the work week the employee is scheduled to work at the time the leave is taken # noqa: E501
:param work_pattern: The work_pattern of this EmployeeStatutorySickLeave. # noqa: E501
:type: list[str]
"""
if work_pattern is None:
raise ValueError(
"Invalid value for `work_pattern`, must not be `None`"
) # noqa: E501
self._work_pattern = work_pattern
@property
def is_pregnancy_related(self):
"""Gets the is_pregnancy_related of this EmployeeStatutorySickLeave. # noqa: E501
Whether the sick leave was pregnancy related # noqa: E501
:return: The is_pregnancy_related of this EmployeeStatutorySickLeave. # noqa: E501
:rtype: bool
"""
return self._is_pregnancy_related
@is_pregnancy_related.setter
def is_pregnancy_related(self, is_pregnancy_related):
"""Sets the is_pregnancy_related of this EmployeeStatutorySickLeave.
Whether the sick leave was pregnancy related # noqa: E501
:param is_pregnancy_related: The is_pregnancy_related of this EmployeeStatutorySickLeave. # noqa: E501
:type: bool
"""
if is_pregnancy_related is None:
raise ValueError(
"Invalid value for `is_pregnancy_related`, must not be `None`"
) # noqa: E501
self._is_pregnancy_related = is_pregnancy_related
@property
def sufficient_notice(self):
"""Gets the sufficient_notice of this EmployeeStatutorySickLeave. # noqa: E501
Whether the employee provided sufficent notice and documentation as required by the employer supporting the sick leave request # noqa: E501
:return: The sufficient_notice of this EmployeeStatutorySickLeave. # noqa: E501
:rtype: bool
"""
return self._sufficient_notice
@sufficient_notice.setter
def sufficient_notice(self, sufficient_notice):
"""Sets the sufficient_notice of this EmployeeStatutorySickLeave.
Whether the employee provided sufficent notice and documentation as required by the employer supporting the sick leave request # noqa: E501
:param sufficient_notice: The sufficient_notice of this EmployeeStatutorySickLeave. # noqa: E501
:type: bool
"""
if sufficient_notice is None:
raise ValueError(
"Invalid value for `sufficient_notice`, must not be `None`"
) # noqa: E501
self._sufficient_notice = sufficient_notice
@property
def is_entitled(self):
"""Gets the is_entitled of this EmployeeStatutorySickLeave. # noqa: E501
Whether the leave was entitled to receive payment # noqa: E501
:return: The is_entitled of this EmployeeStatutorySickLeave. # noqa: E501
:rtype: bool
"""
return self._is_entitled
@is_entitled.setter
def is_entitled(self, is_entitled):
"""Sets the is_entitled of this EmployeeStatutorySickLeave.
Whether the leave was entitled to receive payment # noqa: E501
:param is_entitled: The is_entitled of this EmployeeStatutorySickLeave. # noqa: E501
:type: bool
"""
self._is_entitled = is_entitled
@property
def entitlement_weeks_requested(self):
"""Gets the entitlement_weeks_requested of this EmployeeStatutorySickLeave. # noqa: E501
The amount of requested time (in weeks) # noqa: E501
:return: The entitlement_weeks_requested of this EmployeeStatutorySickLeave. # noqa: E501
:rtype: float
"""
return self._entitlement_weeks_requested
@entitlement_weeks_requested.setter
def entitlement_weeks_requested(self, entitlement_weeks_requested):
"""Sets the entitlement_weeks_requested of this EmployeeStatutorySickLeave.
The amount of requested time (in weeks) # noqa: E501
:param entitlement_weeks_requested: The entitlement_weeks_requested of this EmployeeStatutorySickLeave. # noqa: E501
:type: float
"""
self._entitlement_weeks_requested = entitlement_weeks_requested
@property
def entitlement_weeks_qualified(self):
"""Gets the entitlement_weeks_qualified of this EmployeeStatutorySickLeave. # noqa: E501
The amount of statutory sick leave time off (in weeks) that is available to take at the time the leave was requested # noqa: E501
:return: The entitlement_weeks_qualified of this EmployeeStatutorySickLeave. # noqa: E501
:rtype: float
"""
return self._entitlement_weeks_qualified
@entitlement_weeks_qualified.setter
def entitlement_weeks_qualified(self, entitlement_weeks_qualified):
"""Sets the entitlement_weeks_qualified of this EmployeeStatutorySickLeave.
The amount of statutory sick leave time off (in weeks) that is available to take at the time the leave was requested # noqa: E501
:param entitlement_weeks_qualified: The entitlement_weeks_qualified of this EmployeeStatutorySickLeave. # noqa: E501
:type: float
"""
self._entitlement_weeks_qualified = entitlement_weeks_qualified
@property
def entitlement_weeks_remaining(self):
"""Gets the entitlement_weeks_remaining of this EmployeeStatutorySickLeave. # noqa: E501
A calculated amount of time (in weeks) that remains for the statutory sick leave period # noqa: E501
:return: The entitlement_weeks_remaining of this EmployeeStatutorySickLeave. # noqa: E501
:rtype: float
"""
return self._entitlement_weeks_remaining
@entitlement_weeks_remaining.setter
def entitlement_weeks_remaining(self, entitlement_weeks_remaining):
"""Sets the entitlement_weeks_remaining of this EmployeeStatutorySickLeave.
A calculated amount of time (in weeks) that remains for the statutory sick leave period # noqa: E501
:param entitlement_weeks_remaining: The entitlement_weeks_remaining of this EmployeeStatutorySickLeave. # noqa: E501
:type: float
"""
self._entitlement_weeks_remaining = entitlement_weeks_remaining
@property
def overlaps_with_other_leave(self):
"""Gets the overlaps_with_other_leave of this EmployeeStatutorySickLeave. # noqa: E501
Whether another leave (Paternity, Shared Parental specifically) occurs during the requested leave's period. While this is allowed it could affect payment amounts # noqa: E501
:return: The overlaps_with_other_leave of this EmployeeStatutorySickLeave. # noqa: E501
:rtype: bool
"""
return self._overlaps_with_other_leave
@overlaps_with_other_leave.setter
def overlaps_with_other_leave(self, overlaps_with_other_leave):
"""Sets the overlaps_with_other_leave of this EmployeeStatutorySickLeave.
Whether another leave (Paternity, Shared Parental specifically) occurs during the requested leave's period. While this is allowed it could affect payment amounts # noqa: E501
:param overlaps_with_other_leave: The overlaps_with_other_leave of this EmployeeStatutorySickLeave. # noqa: E501
:type: bool
"""
self._overlaps_with_other_leave = overlaps_with_other_leave
@property
def entitlement_failure_reasons(self):
"""Gets the entitlement_failure_reasons of this EmployeeStatutorySickLeave. # noqa: E501
If the leave requested was considered \"not entitled\", the reasons why are listed here. # noqa: E501
:return: The entitlement_failure_reasons of this EmployeeStatutorySickLeave. # noqa: E501
:rtype: list[str]
"""
return self._entitlement_failure_reasons
@entitlement_failure_reasons.setter
def entitlement_failure_reasons(self, entitlement_failure_reasons):
"""Sets the entitlement_failure_reasons of this EmployeeStatutorySickLeave.
If the leave requested was considered \"not entitled\", the reasons why are listed here. # noqa: E501
:param entitlement_failure_reasons: The entitlement_failure_reasons of this EmployeeStatutorySickLeave. # noqa: E501
:type: list[str]
"""
allowed_values = [
"UnableToCalculateAwe",
"AweLowerThanLel",
"NotQualifiedInPreviousPiw",
"ExceededMaximumEntitlementWeeksOfSsp",
"ExceededMaximumDurationOfPiw",
"SufficientNoticeNotGiven",
"None",
] # noqa: E501
if not set(entitlement_failure_reasons).issubset(set(allowed_values)):
raise ValueError(
"Invalid values for `entitlement_failure_reasons` [{0}], must be a subset of [{1}]".format( # noqa: E501
", ".join(
map(str, set(entitlement_failure_reasons) - set(allowed_values))
), # noqa: E501
", ".join(map(str, allowed_values)),
)
)
self._entitlement_failure_reasons = entitlement_failure_reasons
| [
"[email protected]"
] | |
c882d325f55029694257bdc9f8142cef5ca05acf | 0c0168a4676bce7453836a7509e7133044aa8975 | /byceps/services/page/dbmodels.py | c6d87ca51e5d385a04bb0f4730c934f43332f5b6 | [
"BSD-3-Clause"
] | permissive | byceps/byceps | 0aad3c4d974f76c6f8c3674d5539a80c9107b97a | eaee2b7fdc08c76c16ddf7f436110e0b5f1812e5 | refs/heads/main | 2023-09-01T04:03:13.365687 | 2023-09-01T03:28:18 | 2023-09-01T03:28:18 | 40,150,239 | 44 | 23 | BSD-3-Clause | 2023-05-16T18:41:32 | 2015-08-03T22:05:23 | Python | UTF-8 | Python | false | false | 3,850 | py | """
byceps.services.page.dbmodels
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Pages of database-stored content. Can contain HTML and template engine
syntax.
:Copyright: 2014-2023 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from __future__ import annotations
from datetime import datetime
from sqlalchemy.ext.associationproxy import association_proxy
from byceps.database import db, generate_uuid7
from byceps.services.language.dbmodels import DbLanguage
from byceps.services.site.models import SiteID
from byceps.services.site_navigation.dbmodels import DbNavMenu
from byceps.services.user.dbmodels.user import DbUser
from byceps.typing import UserID
class DbPage(db.Model):
"""A content page.
Any page is expected to have at least one version (the initial one).
"""
__tablename__ = 'pages'
__table_args__ = (
db.UniqueConstraint('site_id', 'name', 'language_code'),
db.UniqueConstraint('site_id', 'language_code', 'url_path'),
)
id = db.Column(db.Uuid, default=generate_uuid7, primary_key=True)
site_id = db.Column(
db.UnicodeText, db.ForeignKey('sites.id'), index=True, nullable=False
)
name = db.Column(db.UnicodeText, index=True, nullable=False)
language_code = db.Column(
db.UnicodeText,
db.ForeignKey('languages.code'),
index=True,
nullable=False,
)
language = db.relationship(DbLanguage)
url_path = db.Column(db.UnicodeText, index=True, nullable=False)
published = db.Column(db.Boolean, nullable=False)
nav_menu_id = db.Column(
db.Uuid, db.ForeignKey('site_nav_menus.id'), nullable=True
)
nav_menu = db.relationship(DbNavMenu)
current_version = association_proxy(
'current_version_association', 'version'
)
def __init__(
self, site_id: SiteID, name: str, language_code: str, url_path: str
) -> None:
self.site_id = site_id
self.name = name
self.language_code = language_code
self.url_path = url_path
self.published = False
class DbPageVersion(db.Model):
"""A snapshot of a page at a certain time."""
__tablename__ = 'page_versions'
id = db.Column(db.Uuid, default=generate_uuid7, primary_key=True)
page_id = db.Column(
db.Uuid, db.ForeignKey('pages.id'), index=True, nullable=False
)
page = db.relationship(DbPage)
created_at = db.Column(db.DateTime, default=datetime.utcnow, nullable=False)
creator_id = db.Column(db.Uuid, db.ForeignKey('users.id'), nullable=False)
creator = db.relationship(DbUser)
title = db.Column(db.UnicodeText, nullable=False)
head = db.Column(db.UnicodeText, nullable=True)
body = db.Column(db.UnicodeText, nullable=False)
def __init__(
self,
page: DbPage,
creator_id: UserID,
title: str,
head: str | None,
body: str,
) -> None:
self.page = page
self.creator_id = creator_id
self.title = title
self.head = head
self.body = body
@property
def is_current(self) -> bool:
"""Return `True` if this version is the current version of the
page it belongs to.
"""
return self.id == self.page.current_version.id
class DbCurrentPageVersionAssociation(db.Model):
__tablename__ = 'page_current_versions'
page_id = db.Column(db.Uuid, db.ForeignKey('pages.id'), primary_key=True)
page = db.relationship(
DbPage, backref=db.backref('current_version_association', uselist=False)
)
version_id = db.Column(
db.Uuid, db.ForeignKey('page_versions.id'), unique=True, nullable=False
)
version = db.relationship(DbPageVersion)
def __init__(self, page: DbPage, version: DbPageVersion) -> None:
self.page = page
self.version = version
| [
"[email protected]"
] | |
12ab1ca8d4888e9bc31dde1f5c9c0d2d9e71550c | ebf5c43e530f450d7057823f62cb66fe7013126a | /homebot/modules/ci/projects/aosp/constants.py | 911a51dc039330a86e4c07c4e38bfdf29859f1db | [] | no_license | dinhsan2000/HomeBot1 | ee58ce35fc20522660d024cb454a478cd25a84a4 | a3729d981b2aadeb05fd1da5ed956079ac3105d1 | refs/heads/master | 2023-06-07T16:38:25.256832 | 2023-05-26T04:53:51 | 2023-05-26T04:53:51 | 343,099,642 | 0 | 0 | null | 2023-05-26T04:55:24 | 2021-02-28T12:27:23 | Python | UTF-8 | Python | false | false | 313 | py | ERROR_CODES = {
0: "Build completed successfully",
4: "Build failed: Missing arguments or wrong building path",
5: "Build failed: Lunching failed",
6: "Build failed: Cleaning failed",
7: "Build failed: Building failed"
}
NEEDS_LOGS_UPLOAD = {
5: "lunch_log.txt",
6: "clean_log.txt",
7: "build_log.txt"
}
| [
"[email protected]"
] | |
5e956a4be4f7164ed845671d2ae9afa55c976131 | 51f887286aa3bd2c3dbe4c616ad306ce08976441 | /pybind/slxos/v17s_1_02/brocade_fcoe_ext_rpc/fcoe_get_login/output/__init__.py | 7448cde7e7dce8096c58c337285dce0083bcb99c | [
"Apache-2.0"
] | permissive | b2220333/pybind | a8c06460fd66a97a78c243bf144488eb88d7732a | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | refs/heads/master | 2020-03-18T09:09:29.574226 | 2018-04-03T20:09:50 | 2018-04-03T20:09:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,172 | py |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import fcoe_login_list
class output(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-fcoe-ext - based on the path /brocade_fcoe_ext_rpc/fcoe-get-login/output. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__fcoe_login_list','__fcoe_login_total_logins',)
_yang_name = 'output'
_rest_name = 'output'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__fcoe_login_total_logins = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(0), is_leaf=True, yang_name="fcoe-login-total-logins", rest_name="fcoe-login-total-logins", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-fcoe-ext', defining_module='brocade-fcoe-ext', yang_type='yang:zero-based-counter32', is_config=True)
self.__fcoe_login_list = YANGDynClass(base=YANGListType("fcoe_login_session_mac",fcoe_login_list.fcoe_login_list, yang_name="fcoe-login-list", rest_name="fcoe-login-list", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='fcoe-login-session-mac', extensions=None), is_container='list', yang_name="fcoe-login-list", rest_name="fcoe-login-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-fcoe-ext', defining_module='brocade-fcoe-ext', yang_type='list', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'brocade_fcoe_ext_rpc', u'fcoe-get-login', u'output']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'fcoe-get-login', u'output']
def _get_fcoe_login_list(self):
"""
Getter method for fcoe_login_list, mapped from YANG variable /brocade_fcoe_ext_rpc/fcoe_get_login/output/fcoe_login_list (list)
YANG Description: This is a list of logged-in FCoE End nodes.
Each row represents a logged-in FCoE device
operational details such as ethernet port number,
the WWN and MAC address of the device that has
logged in and session MAC. The session MAC is used
as the key for this list as it will be unique
for each entry.
"""
return self.__fcoe_login_list
def _set_fcoe_login_list(self, v, load=False):
"""
Setter method for fcoe_login_list, mapped from YANG variable /brocade_fcoe_ext_rpc/fcoe_get_login/output/fcoe_login_list (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_fcoe_login_list is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_fcoe_login_list() directly.
YANG Description: This is a list of logged-in FCoE End nodes.
Each row represents a logged-in FCoE device
operational details such as ethernet port number,
the WWN and MAC address of the device that has
logged in and session MAC. The session MAC is used
as the key for this list as it will be unique
for each entry.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("fcoe_login_session_mac",fcoe_login_list.fcoe_login_list, yang_name="fcoe-login-list", rest_name="fcoe-login-list", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='fcoe-login-session-mac', extensions=None), is_container='list', yang_name="fcoe-login-list", rest_name="fcoe-login-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-fcoe-ext', defining_module='brocade-fcoe-ext', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """fcoe_login_list must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("fcoe_login_session_mac",fcoe_login_list.fcoe_login_list, yang_name="fcoe-login-list", rest_name="fcoe-login-list", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='fcoe-login-session-mac', extensions=None), is_container='list', yang_name="fcoe-login-list", rest_name="fcoe-login-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-fcoe-ext', defining_module='brocade-fcoe-ext', yang_type='list', is_config=True)""",
})
self.__fcoe_login_list = t
if hasattr(self, '_set'):
self._set()
def _unset_fcoe_login_list(self):
self.__fcoe_login_list = YANGDynClass(base=YANGListType("fcoe_login_session_mac",fcoe_login_list.fcoe_login_list, yang_name="fcoe-login-list", rest_name="fcoe-login-list", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='fcoe-login-session-mac', extensions=None), is_container='list', yang_name="fcoe-login-list", rest_name="fcoe-login-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-fcoe-ext', defining_module='brocade-fcoe-ext', yang_type='list', is_config=True)
def _get_fcoe_login_total_logins(self):
"""
Getter method for fcoe_login_total_logins, mapped from YANG variable /brocade_fcoe_ext_rpc/fcoe_get_login/output/fcoe_login_total_logins (yang:zero-based-counter32)
YANG Description: This leaf indicates the Total Number of devices
logged in.
"""
return self.__fcoe_login_total_logins
def _set_fcoe_login_total_logins(self, v, load=False):
"""
Setter method for fcoe_login_total_logins, mapped from YANG variable /brocade_fcoe_ext_rpc/fcoe_get_login/output/fcoe_login_total_logins (yang:zero-based-counter32)
If this variable is read-only (config: false) in the
source YANG file, then _set_fcoe_login_total_logins is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_fcoe_login_total_logins() directly.
YANG Description: This leaf indicates the Total Number of devices
logged in.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(0), is_leaf=True, yang_name="fcoe-login-total-logins", rest_name="fcoe-login-total-logins", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-fcoe-ext', defining_module='brocade-fcoe-ext', yang_type='yang:zero-based-counter32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """fcoe_login_total_logins must be of a type compatible with yang:zero-based-counter32""",
'defined-type': "yang:zero-based-counter32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(0), is_leaf=True, yang_name="fcoe-login-total-logins", rest_name="fcoe-login-total-logins", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-fcoe-ext', defining_module='brocade-fcoe-ext', yang_type='yang:zero-based-counter32', is_config=True)""",
})
self.__fcoe_login_total_logins = t
if hasattr(self, '_set'):
self._set()
def _unset_fcoe_login_total_logins(self):
self.__fcoe_login_total_logins = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(0), is_leaf=True, yang_name="fcoe-login-total-logins", rest_name="fcoe-login-total-logins", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-fcoe-ext', defining_module='brocade-fcoe-ext', yang_type='yang:zero-based-counter32', is_config=True)
fcoe_login_list = __builtin__.property(_get_fcoe_login_list, _set_fcoe_login_list)
fcoe_login_total_logins = __builtin__.property(_get_fcoe_login_total_logins, _set_fcoe_login_total_logins)
_pyangbind_elements = {'fcoe_login_list': fcoe_login_list, 'fcoe_login_total_logins': fcoe_login_total_logins, }
| [
"[email protected]"
] | |
3980dfc43f8a80ea5205df2679fb75913c1ac86f | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p3BR/R1/benchmark/startCirq246.py | b790fec452d5179bbb0820b6e56a6a7ce9cf6fef | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,399 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=3
# total number=46
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.rx(-0.09738937226128368).on(input_qubit[2])) # number=2
c.append(cirq.H.on(input_qubit[1])) # number=33
c.append(cirq.CZ.on(input_qubit[2],input_qubit[1])) # number=34
c.append(cirq.H.on(input_qubit[1])) # number=35
c.append(cirq.H.on(input_qubit[1])) # number=3
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=4
c.append(cirq.Y.on(input_qubit[1])) # number=15
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=10
c.append(cirq.H.on(input_qubit[1])) # number=19
c.append(cirq.CZ.on(input_qubit[0],input_qubit[1])) # number=20
c.append(cirq.rx(-0.6000441968356504).on(input_qubit[1])) # number=28
c.append(cirq.H.on(input_qubit[1])) # number=21
c.append(cirq.H.on(input_qubit[1])) # number=30
c.append(cirq.CZ.on(input_qubit[0],input_qubit[1])) # number=31
c.append(cirq.H.on(input_qubit[1])) # number=32
c.append(cirq.X.on(input_qubit[1])) # number=23
c.append(cirq.H.on(input_qubit[2])) # number=29
c.append(cirq.H.on(input_qubit[1])) # number=36
c.append(cirq.CZ.on(input_qubit[0],input_qubit[1])) # number=37
c.append(cirq.H.on(input_qubit[1])) # number=38
c.append(cirq.H.on(input_qubit[1])) # number=43
c.append(cirq.CZ.on(input_qubit[0],input_qubit[1])) # number=44
c.append(cirq.H.on(input_qubit[1])) # number=45
c.append(cirq.Z.on(input_qubit[1])) # number=11
c.append(cirq.H.on(input_qubit[1])) # number=42
c.append(cirq.H.on(input_qubit[0])) # number=39
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=40
c.append(cirq.H.on(input_qubit[0])) # number=41
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[1])) # number=26
c.append(cirq.Y.on(input_qubit[1])) # number=14
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=5
c.append(cirq.X.on(input_qubit[1])) # number=6
c.append(cirq.Z.on(input_qubit[1])) # number=8
c.append(cirq.X.on(input_qubit[1])) # number=7
c.append(cirq.rx(-2.42845112122491).on(input_qubit[1])) # number=25
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq246.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | [
"[email protected]"
] | |
59fae6464fad58ccfcab0782982ac6ed75f7aa20 | e33c95326f6800d435125427a73460a009532a12 | /kotti/tests/test_util.py | 124bd71b4fdf038cd8d2f2aafb5adcc6c6dff055 | [
"BSD-3-Clause-Modification"
] | permissive | stevepiercy/Kotti | 839269f6dc1c45645e5d868b0f17e27bea04b5ac | 45c1627ae9fedbc24d1b817048e153f4d7a2d06d | refs/heads/master | 2021-01-17T21:33:02.795714 | 2012-03-17T22:06:04 | 2012-03-17T22:06:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,134 | py | from unittest import TestCase
from mock import MagicMock
from pyramid.registry import Registry
from kotti.testing import DummyRequest
from kotti.testing import UnitTestBase
class TestNestedMutationDict(TestCase):
def test_dictwrapper_basics(self):
from kotti.util import NestedMutationDict
data = {}
wrapper = NestedMutationDict(data)
changed = wrapper.changed = MagicMock()
wrapper['name'] = 'andy'
assert data == {'name': 'andy'}
assert wrapper == {'name': 'andy'}
assert wrapper['name'] == 'andy'
assert changed.call_count == 1
wrapper['age'] = 77
assert data == {'name': 'andy', 'age': 77}
assert wrapper['age'] == 77
assert wrapper['name'] == 'andy'
assert changed.call_count == 2
wrapper['age'] += 1
assert data == {'name': 'andy', 'age': 78}
assert wrapper['age'] == 78
assert changed.call_count == 3
def test_listwrapper_basics(self):
from kotti.util import NestedMutationList
data = []
wrapper = NestedMutationList(data)
changed = wrapper.changed = MagicMock()
wrapper.append(5)
assert data == [5]
assert wrapper == [5]
assert wrapper[0] == 5
assert changed.call_count == 1
wrapper.insert(0, 33)
assert data == [33, 5]
assert wrapper[0] == 33
assert changed.call_count == 2
del wrapper[0]
assert data == [5]
assert wrapper[0] == 5
assert changed.call_count == 3
def test_dictwrapper_wraps(self):
from kotti.util import NestedMutationDict
from kotti.util import NestedMutationList
wrapper = NestedMutationDict(
{'name': 'andy', 'age': 77, 'children': []})
changed = wrapper.changed = MagicMock()
wrapper['name'] = 'randy'
assert changed.call_count == 1
assert isinstance(wrapper['children'], NestedMutationList)
wrapper['children'].append({'name': 'sandy', 'age': 33})
assert changed.call_count == 2
assert len(wrapper['children']), 1
assert isinstance(wrapper['children'][0], NestedMutationDict)
def test_listwrapper_wraps(self):
from kotti.util import NestedMutationDict
from kotti.util import NestedMutationList
wrapper = NestedMutationList(
[{'name': 'andy', 'age': 77, 'children': []}])
changed = wrapper.changed = MagicMock()
assert isinstance(wrapper[0], NestedMutationDict)
assert isinstance(wrapper[0]['children'], NestedMutationList)
assert changed.call_count == 0
class TestRequestCache(UnitTestBase):
def setUp(self):
from kotti.util import request_cache
registry = Registry('testing')
request = DummyRequest()
request.registry = registry
super(TestRequestCache, self).setUp(registry=registry, request=request)
self.cache_decorator = request_cache
def test_it(self):
from kotti.util import clear_cache
called = []
@self.cache_decorator(lambda a, b: (a, b))
def my_fun(a, b):
called.append((a, b))
my_fun(1, 2)
my_fun(1, 2)
self.assertEqual(len(called), 1)
my_fun(2, 1)
self.assertEqual(len(called), 2)
clear_cache()
my_fun(1, 2)
self.assertEqual(len(called), 3)
def test_dont_cache(self):
from kotti.util import DontCache
called = []
def dont_cache(a, b):
raise DontCache
@self.cache_decorator(dont_cache)
def my_fun(a, b):
called.append((a, b))
my_fun(1, 2)
my_fun(1, 2)
self.assertEqual(len(called), 2)
class TestLRUCache(TestRequestCache):
def setUp(self):
from kotti.util import lru_cache
super(TestLRUCache, self).setUp()
self.cache_decorator = lru_cache
class TestTitleToName(TestCase):
def test_max_length(self):
from kotti.util import title_to_name
assert len(title_to_name(u'a' * 50)) == 40
| [
"[email protected]"
] | |
0d7d1a2e5a75a423baf17920318f62326ef3922d | e116a28a8e4d07bb4de1812fde957a38155eb6df | /shuidi.py | 9e44887d29f123ec7c6581e5ae7720927105ca78 | [] | no_license | gl-coding/EasyPyEcharts | 5582ddf6be3158f13663778c1038767a87756216 | f9dbe8ad7389a6e2629643c9b7af7b9dc3bfccd5 | refs/heads/master | 2020-09-29T20:48:46.260306 | 2019-12-10T12:52:24 | 2019-12-10T12:52:24 | 227,119,587 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 319 | py | #encoding=utf-8
from pyecharts import Liquid
liquid =Liquid("水球图示例")
arg = 1
if arg == 1:
liquid.add("Liquid", [0.6])
liquid.show_config()
liquid.render()
else:
liquid.add("Liquid", [0.6, 0.5, 0.4, 0.3], is_liquid_animation=False, shape='diamond')
liquid.show_config()
liquid.render()
| [
"[email protected]"
] | |
023f6ae6a4a24897cfab217ea9ff439c94ea5592 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-sblp-obt/sblp_ut=3.5_rd=1_rw=0.04_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=44/params.py | f73081fbfcea69ca72664c1d6db95d6e68538d5e | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | py | {'cpus': 4,
'duration': 30,
'final_util': '3.626000',
'max_util': '3.5',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '1',
'res_nmb': '4',
'res_weight': '0.04',
'scheduler': 'RUN',
'trial': 44,
'utils': 'uni-medium-3'}
| [
"[email protected]"
] | |
96ab5a91a79d6f425b4e374341b5abea30dd9603 | 715135613fe4030fc265d6ed5903608a2e98811d | /hue/phue.py | d081eb08a6b16c013cbc45fe4a4645192e737710 | [
"MIT"
] | permissive | tanyan-tanyan/Trusty-cogs | f10027350e8a9325ff277519a77f1cd74b1f7692 | bcdf5f22448bb6741db1775c7df4b2f124e2ca66 | refs/heads/master | 2022-11-26T04:05:48.665570 | 2020-07-27T00:49:10 | 2020-07-27T00:49:10 | 282,751,933 | 0 | 1 | MIT | 2020-07-26T23:40:03 | 2020-07-26T23:40:02 | null | UTF-8 | Python | false | false | 63,787 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
phue by Nathanaël Lécaudé - A Philips Hue Python library
Contributions by Marshall Perrin, Justin Lintz
https://github.com/studioimaginaire/phue
Original protocol hacking by rsmck : http://rsmck.co.uk/hue
Published under the MIT license - See LICENSE file for more details.
"Hue Personal Wireless Lighting" is a trademark owned by Koninklijke Philips Electronics N.V., see www.meethue.com for more information.
I am in no way affiliated with the Philips organization.
Slight modification of this PR on phue
https://github.com/andofrjando/phue/tree/remote
'''
import calendar
import hashlib
import json
import logging
import os
import platform
import re
import sys
import socket
from copy import deepcopy
from datetime import datetime, timedelta
if sys.version_info[0] > 2:
PY3K = True
else:
PY3K = False
if PY3K:
import http.client as httplib
from datetime import timezone
from urllib.parse import parse_qs, urlparse, urlencode
UTC = timezone.utc
else:
import httplib
from datetime import tzinfo
from urllib import urlencode
from urlparse import parse_qs, urlparse
class UTC(tzinfo):
def utcoffset(self, dt):
return timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return timedelta(0)
FileNotFoundError = IOError
input = raw_input
UTC = UTC()
logger = logging.getLogger('phue')
if platform.system() == 'Windows':
USER_HOME = 'USERPROFILE'
else:
USER_HOME = 'HOME'
__version__ = '1.1'
def is_string(data):
"""Utility method to see if data is a string."""
if PY3K:
return isinstance(data, str)
else:
return isinstance(data, str) or isinstance(data, unicode) # noqa
class PhueException(Exception):
def __init__(self, id, message):
self.id = id
self.message = message
class PhueRegistrationException(PhueException):
pass
class PhueRequestTimeout(PhueException):
pass
class Light(object):
""" Hue Light object
Light settings can be accessed or set via the properties of this object.
"""
def __init__(self, bridge, light_id):
self.bridge = bridge
self.light_id = light_id
self._name = None
self._on = None
self._brightness = None
self._colormode = None
self._hue = None
self._saturation = None
self._xy = None
self._colortemp = None
self._effect = None
self._alert = None
self.transitiontime = None # default
self._reset_bri_after_on = None
self._reachable = None
self._type = None
def __repr__(self):
# like default python repr function, but add light name
return '<{0}.{1} object "{2}" at {3}>'.format(
self.__class__.__module__,
self.__class__.__name__,
self.name,
hex(id(self)))
# Wrapper functions for get/set through the bridge, adding support for
# remembering the transitiontime parameter if the user has set it
def _get(self, *args, **kwargs):
return self.bridge.get_light(self.light_id, *args, **kwargs)
def _set(self, *args, **kwargs):
if self.transitiontime is not None:
kwargs['transitiontime'] = self.transitiontime
logger.debug("Setting with transitiontime = {0} ds = {1} s".format(
self.transitiontime, float(self.transitiontime) / 10))
if (args[0] == 'on' and args[1] is False) or (
kwargs.get('on', True) is False):
self._reset_bri_after_on = True
return self.bridge.set_light(self.light_id, *args, **kwargs)
@property
def name(self):
'''Get or set the name of the light [string]'''
if PY3K:
self._name = self._get('name')
else:
self._name = self._get('name').encode('utf-8')
return self._name
@name.setter
def name(self, value):
old_name = self.name
self._name = value
self._set('name', self._name)
logger.debug("Renaming light from '{0}' to '{1}'".format(
old_name, value))
self.bridge.lights_by_name[self.name] = self
del self.bridge.lights_by_name[old_name]
@property
def on(self):
'''Get or set the state of the light [True|False]'''
self._on = self._get('on')
return self._on
@on.setter
def on(self, value):
# Some added code here to work around known bug where
# turning off with transitiontime set makes it restart on brightness = 1
# see
# http://www.everyhue.com/vanilla/discussion/204/bug-with-brightness-when-requesting-ontrue-transitiontime5
# if we're turning off, save whether this bug in the hardware has been
# invoked
if self._on and value is False:
self._reset_bri_after_on = self.transitiontime is not None
if self._reset_bri_after_on:
logger.warning(
'Turned off light with transitiontime specified, brightness will be reset on power on')
self._set('on', value)
# work around bug by resetting brightness after a power on
if self._on is False and value is True:
if self._reset_bri_after_on:
logger.warning(
'Light was turned off with transitiontime specified, brightness needs to be reset now.')
self.brightness = self._brightness
self._reset_bri_after_on = False
self._on = value
@property
def colormode(self):
'''Get the color mode of the light [hs|xy|ct]'''
self._colormode = self._get('colormode')
return self._colormode
@property
def brightness(self):
'''Get or set the brightness of the light [0-254].
0 is not off'''
self._brightness = self._get('bri')
return self._brightness
@brightness.setter
def brightness(self, value):
self._brightness = value
self._set('bri', self._brightness)
@property
def hue(self):
'''Get or set the hue of the light [0-65535]'''
self._hue = self._get('hue')
return self._hue
@hue.setter
def hue(self, value):
self._hue = int(value)
self._set('hue', self._hue)
@property
def saturation(self):
'''Get or set the saturation of the light [0-254]
0 = white
254 = most saturated
'''
self._saturation = self._get('sat')
return self._saturation
@saturation.setter
def saturation(self, value):
self._saturation = value
self._set('sat', self._saturation)
@property
def xy(self):
'''Get or set the color coordinates of the light [ [0.0-1.0, 0.0-1.0] ]
This is in a color space similar to CIE 1931 (but not quite identical)
'''
self._xy = self._get('xy')
return self._xy
@xy.setter
def xy(self, value):
self._xy = value
self._set('xy', self._xy)
@property
def colortemp(self):
'''Get or set the color temperature of the light, in units of mireds [154-500]'''
self._colortemp = self._get('ct')
return self._colortemp
@colortemp.setter
def colortemp(self, value):
if value < 154:
logger.warn('154 mireds is coolest allowed color temp')
elif value > 500:
logger.warn('500 mireds is warmest allowed color temp')
self._colortemp = value
self._set('ct', self._colortemp)
@property
def colortemp_k(self):
'''Get or set the color temperature of the light, in units of Kelvin [2000-6500]'''
self._colortemp = self._get('ct')
return int(round(1e6 / self._colortemp))
@colortemp_k.setter
def colortemp_k(self, value):
if value > 6500:
logger.warn('6500 K is max allowed color temp')
value = 6500
elif value < 2000:
logger.warn('2000 K is min allowed color temp')
value = 2000
colortemp_mireds = int(round(1e6 / value))
logger.debug("{0:d} K is {1} mireds".format(value, colortemp_mireds))
self.colortemp = colortemp_mireds
@property
def effect(self):
'''Check the effect setting of the light. [none|colorloop]'''
self._effect = self._get('effect')
return self._effect
@effect.setter
def effect(self, value):
self._effect = value
self._set('effect', self._effect)
@property
def alert(self):
'''Get or set the alert state of the light [select|lselect|none]'''
self._alert = self._get('alert')
return self._alert
@alert.setter
def alert(self, value):
if value is None:
value = 'none'
self._alert = value
self._set('alert', self._alert)
@property
def reachable(self):
'''Get the reachable state of the light [boolean]'''
self._reachable = self._get('reachable')
return self._reachable
@property
def type(self):
'''Get the type of the light [string]'''
self._type = self._get('type')
return self._type
class SensorState(dict):
def __init__(self, bridge, sensor_id):
self._bridge = bridge
self._sensor_id = sensor_id
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
self._bridge.set_sensor_state(self._sensor_id, self)
class SensorConfig(dict):
def __init__(self, bridge, sensor_id):
self._bridge = bridge
self._sensor_id = sensor_id
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
self._bridge.set_sensor_config(self._sensor_id, self)
class Sensor(object):
""" Hue Sensor object
Sensor config and state can be read and updated via the properties of this object
"""
def __init__(self, bridge, sensor_id):
self.bridge = bridge
self.sensor_id = sensor_id
self._name = None
self._model = None
self._swversion = None
self._type = None
self._uniqueid = None
self._manufacturername = None
self._state = SensorState(bridge, sensor_id)
self._config = {}
self._recycle = None
def __repr__(self):
# like default python repr function, but add sensor name
return '<{0}.{1} object "{2}" at {3}>'.format(
self.__class__.__module__,
self.__class__.__name__,
self.name,
hex(id(self)))
# Wrapper functions for get/set through the bridge
def _get(self, *args, **kwargs):
return self.bridge.get_sensor(self.sensor_id, *args, **kwargs)
def _set(self, *args, **kwargs):
return self.bridge.set_sensor(self.sensor_id, *args, **kwargs)
@property
def name(self):
'''Get or set the name of the sensor [string]'''
if PY3K:
self._name = self._get('name')
else:
self._name = self._get('name').encode('utf-8')
return self._name
@name.setter
def name(self, value):
old_name = self.name
self._name = value
self._set('name', self._name)
logger.debug("Renaming sensor from '{0}' to '{1}'".format(
old_name, value))
self.bridge.sensors_by_name[self.name] = self
del self.bridge.sensors_by_name[old_name]
@property
def modelid(self):
'''Get a unique identifier of the hardware model of this sensor [string]'''
self._modelid = self._get('modelid')
return self._modelid
@property
def swversion(self):
'''Get the software version identifier of the sensor's firmware [string]'''
self._swversion = self._get('swversion')
return self._swversion
@property
def type(self):
'''Get the sensor type of this device [string]'''
self._type = self._get('type')
return self._type
@property
def uniqueid(self):
'''Get the unique device ID of this sensor [string]'''
self._uniqueid = self._get('uniqueid')
return self._uniqueid
@property
def manufacturername(self):
'''Get the name of the manufacturer [string]'''
self._manufacturername = self._get('manufacturername')
return self._manufacturername
@property
def state(self):
''' A dictionary of sensor state. Some values can be updated, some are read-only. [dict]'''
data = self._get('state')
self._state.clear()
self._state.update(data)
return self._state
@state.setter
def state(self, data):
self._state.clear()
self._state.update(data)
@property
def config(self):
''' A dictionary of sensor config. Some values can be updated, some are read-only. [dict]'''
data = self._get('config')
self._config.clear()
self._config.update(data)
return self._config
@config.setter
def config(self, data):
self._config.clear()
self._config.update(data)
@property
def recycle(self):
''' True if this resource should be automatically removed when the last reference to it disappears [bool]'''
self._recycle = self._get('manufacturername')
return self._manufacturername
class Group(Light):
""" A group of Hue lights, tracked as a group on the bridge
Example:
>>> b = Bridge()
>>> g1 = Group(b, 1)
>>> g1.hue = 50000 # all lights in that group turn blue
>>> g1.on = False # all will turn off
>>> g2 = Group(b, 'Kitchen') # you can also look up groups by name
>>> # will raise a LookupError if the name doesn't match
"""
def __init__(self, bridge, group_id):
Light.__init__(self, bridge, None)
del self.light_id # not relevant for a group
try:
self.group_id = int(group_id)
except:
name = group_id
groups = bridge.get_group()
for idnumber, info in groups.items():
if PY3K:
if info['name'] == name:
self.group_id = int(idnumber)
break
else:
if info['name'] == name.decode('utf-8'):
self.group_id = int(idnumber)
break
else:
raise LookupError("Could not find a group by that name.")
# Wrapper functions for get/set through the bridge, adding support for
# remembering the transitiontime parameter if the user has set it
def _get(self, *args, **kwargs):
return self.bridge.get_group(self.group_id, *args, **kwargs)
def _set(self, *args, **kwargs):
# let's get basic group functionality working first before adding
# transition time...
if self.transitiontime is not None:
kwargs['transitiontime'] = self.transitiontime
logger.debug("Setting with transitiontime = {0} ds = {1} s".format(
self.transitiontime, float(self.transitiontime) / 10))
if (args[0] == 'on' and args[1] is False) or (
kwargs.get('on', True) is False):
self._reset_bri_after_on = True
return self.bridge.set_group(self.group_id, *args, **kwargs)
@property
def name(self):
'''Get or set the name of the light group [string]'''
if PY3K:
self._name = self._get('name')
else:
self._name = self._get('name').encode('utf-8')
return self._name
@name.setter
def name(self, value):
old_name = self.name
self._name = value
logger.debug("Renaming light group from '{0}' to '{1}'".format(
old_name, value))
self._set('name', self._name)
@property
def lights(self):
""" Return a list of all lights in this group"""
# response = self.bridge.request('GET', '/api/{0}/groups/{1}'.format(self.bridge.username, self.group_id))
# return [Light(self.bridge, int(l)) for l in response['lights']]
return [Light(self.bridge, int(l)) for l in self._get('lights')]
@lights.setter
def lights(self, value):
""" Change the lights that are in this group"""
logger.debug("Setting lights in group {0} to {1}".format(
self.group_id, str(value)))
self._set('lights', value)
class AllLights(Group):
""" All the Hue lights connected to your bridge
This makes use of the semi-documented feature that
"Group 0" of lights appears to be a group automatically
consisting of all lights. This is not returned by
listing the groups, but is accessible if you explicitly
ask for group 0.
"""
def __init__(self, bridge=None):
if bridge is None:
bridge = Bridge()
Group.__init__(self, bridge, 0)
class Scene(object):
""" Container for Scene """
def __init__(self, sid, appdata=None, lastupdated=None,
lights=None, locked=False, name="", owner="",
picture="", recycle=False, version=0):
self.scene_id = sid
self.appdata = appdata or {}
self.lastupdated = lastupdated
if lights is not None:
self.lights = sorted([int(x) for x in lights])
else:
self.lights = []
self.locked = locked
self.name = name
self.owner = owner
self.picture = picture
self.recycle = recycle
self.version = version
def __repr__(self):
# like default python repr function, but add sensor name
return '<{0}.{1} id="{2}" name="{3}" lights={4}>'.format(
self.__class__.__module__,
self.__class__.__name__,
self.scene_id,
self.name,
self.lights)
class Bridge(object):
""" Interface to the Hue ZigBee bridge
You can obtain Light objects by calling the get_light_objects method:
>>> b = Bridge(ip='192.168.1.100')
>>> b.get_light_objects()
[<phue.Light at 0x10473d750>,
<phue.Light at 0x1046ce110>]
Or more succinctly just by accessing this Bridge object as a list or dict:
>>> b[1]
<phue.Light at 0x10473d750>
>>> b['Kitchen']
<phue.Light at 0x10473d750>
"""
def __init__(self, ip=None, username=None, config_file_path=None, api='/api/'):
""" Initialization function.
Parameters:
------------
ip : string
IP address as dotted quad
username : string, optional
"""
if config_file_path is not None:
self.config_file_path = config_file_path
elif os.getenv(USER_HOME) is not None and os.access(os.getenv(USER_HOME), os.W_OK):
self.config_file_path = os.path.join(os.getenv(USER_HOME), '.python_hue')
elif 'iPad' in platform.machine() or 'iPhone' in platform.machine() or 'iPad' in platform.machine():
self.config_file_path = os.path.join(os.getenv(USER_HOME), 'Documents', '.python_hue')
else:
self.config_file_path = os.path.join(os.getcwd(), '.python_hue')
self.ip = ip
self.username = username
self.lights_by_id = {}
self.lights_by_name = {}
self.sensors_by_id = {}
self.sensors_by_name = {}
self._name = None
self.api = api
# self.minutes = 600 # these do not seem to be used anywhere?
# self.seconds = 10
self.connect()
@property
def name(self):
'''Get or set the name of the bridge [string]'''
self._name = self.request(
'GET', self.api + self.username + '/config')['name']
return self._name
@name.setter
def name(self, value):
self._name = value
data = {'name': self._name}
self.request(
'PUT', self.api + self.username + '/config', data)
def request(self, mode='GET', address=None, data=None):
""" Utility function for HTTP GET/PUT requests for the API"""
if hasattr(self, 'token'):
connection = httplib.HTTPSConnection(self.ip, timeout=20)
headers = {
'Content-Type': 'application/json',
'Authorization': self.token.bearer()
}
else:
connection = httplib.HTTPConnection(self.ip, timeout=10)
headers = {}
try:
if mode == 'GET' or mode == 'DELETE':
connection.request(mode, address, None, headers)
if mode == 'PUT' or mode == 'POST':
connection.request(mode, address, json.dumps(data), headers)
logger.debug("{0} {1} {2} {3}".format(mode, address,
str(data), str(headers)))
except socket.timeout:
error = "{} Request to {}{} timed out.".format(mode, self.ip, address)
logger.exception(error)
raise PhueRequestTimeout(None, error)
result = connection.getresponse()
response = result.read()
connection.close()
if PY3K:
return json.loads(response.decode('utf-8'))
else:
logger.debug(response)
return json.loads(response)
def get_ip_address(self, set_result=False):
""" Get the bridge ip address from the meethue.com nupnp api """
connection = httplib.HTTPSConnection('www.meethue.com')
connection.request('GET', '/api/nupnp')
logger.info('Connecting to meethue.com/api/nupnp')
result = connection.getresponse()
if PY3K:
data = json.loads(str(result.read(), encoding='utf-8'))
else:
result_str = result.read()
data = json.loads(result_str)
""" close connection after read() is done, to prevent issues with read() """
connection.close()
ip = str(data[0]['internalipaddress'])
if ip is not '':
if set_result:
self.ip = ip
return ip
else:
return False
def register_app(self):
""" Register this computer with the Hue bridge hardware and save the resulting access token """
registration_request = {"devicetype": "python_hue"}
response = self.request('POST', self.api, registration_request)
for line in response:
for key in line:
if 'success' in key:
with open(self.config_file_path, 'w') as f:
logger.info(
'Writing configuration file to ' + self.config_file_path)
f.write(json.dumps({self.ip: line['success']}))
logger.info('Reconnecting to the bridge')
self.connect()
if 'error' in key:
error_type = line['error']['type']
if error_type == 101:
raise PhueRegistrationException(error_type,
'The link button has not been pressed in the last 30 seconds.')
if error_type == 7:
raise PhueException(error_type,
'Unknown username')
def connect(self):
""" Connect to the Hue bridge """
logger.info('Attempting to connect to the bridge...')
# If the ip and username were provided at class init
if self.ip is not None and self.username is not None:
logger.info('Using ip: ' + self.ip)
logger.info('Using username: ' + self.username)
return
if self.ip is None or self.username is None:
try:
with open(self.config_file_path) as f:
config = json.loads(f.read())
if self.ip is None:
self.ip = list(config.keys())[0]
logger.info('Using ip from config: ' + self.ip)
else:
logger.info('Using ip: ' + self.ip)
if self.username is None:
self.username = config[self.ip]['username']
logger.info(
'Using username from config: ' + self.username)
else:
logger.info('Using username: ' + self.username)
except Exception as e:
logger.info(
'Error opening config file, will attempt bridge registration')
self.register_app()
def get_light_id_by_name(self, name):
""" Lookup a light id based on string name. Case-sensitive. """
lights = self.get_light()
for light_id in lights:
if PY3K:
if name == lights[light_id]['name']:
return light_id
else:
if name.decode('utf-8') == lights[light_id]['name']:
return light_id
return False
def get_light_objects(self, mode='list'):
"""Returns a collection containing the lights, either by name or id (use 'id' or 'name' as the mode)
The returned collection can be either a list (default), or a dict.
Set mode='id' for a dict by light ID, or mode='name' for a dict by light name. """
if self.lights_by_id == {}:
lights = self.request('GET', self.api + self.username + '/lights/')
for light in lights:
self.lights_by_id[int(light)] = Light(self, int(light))
self.lights_by_name[lights[light][
'name']] = self.lights_by_id[int(light)]
if mode == 'id':
return self.lights_by_id
if mode == 'name':
return self.lights_by_name
if mode == 'list':
# return ligts in sorted id order, dicts have no natural order
return [self.lights_by_id[id] for id in sorted(self.lights_by_id)]
def get_sensor_id_by_name(self, name):
""" Lookup a sensor id based on string name. Case-sensitive. """
sensors = self.get_sensor()
for sensor_id in sensors:
if PY3K:
if name == sensors[sensor_id]['name']:
return sensor_id
else:
if name.decode('utf-8') == sensors[sensor_id]['name']:
return sensor_id
return False
def get_sensor_objects(self, mode='list'):
"""Returns a collection containing the sensors, either by name or id (use 'id' or 'name' as the mode)
The returned collection can be either a list (default), or a dict.
Set mode='id' for a dict by sensor ID, or mode='name' for a dict by sensor name. """
if self.sensors_by_id == {}:
sensors = self.request('GET', self.api + self.username + '/sensors/')
for sensor in sensors:
self.sensors_by_id[int(sensor)] = Sensor(self, int(sensor))
self.sensors_by_name[sensors[sensor][
'name']] = self.sensors_by_id[int(sensor)]
if mode == 'id':
return self.sensors_by_id
if mode == 'name':
return self.sensors_by_name
if mode == 'list':
return self.sensors_by_id.values()
def __getitem__(self, key):
""" Lights are accessibly by indexing the bridge either with
an integer index or string name. """
if self.lights_by_id == {}:
self.get_light_objects()
try:
return self.lights_by_id[key]
except:
try:
if PY3K:
return self.lights_by_name[key]
else:
return self.lights_by_name[key.decode('utf-8')]
except:
raise KeyError(
'Not a valid key (integer index starting with 1, or light name): ' + str(key))
@property
def lights(self):
""" Access lights as a list """
return self.get_light_objects()
def get_api(self):
""" Returns the full api dictionary """
return self.request('GET', self.api + self.username)
def get_light(self, light_id=None, parameter=None):
""" Gets state by light_id and parameter"""
if is_string(light_id):
light_id = self.get_light_id_by_name(light_id)
if light_id is None:
return self.request('GET', self.api + self.username + '/lights/')
state = self.request(
'GET', self.api + self.username + '/lights/' + str(light_id))
if parameter is None:
return state
if parameter in ['name', 'type', 'uniqueid', 'swversion']:
return state[parameter]
else:
try:
return state['state'][parameter]
except KeyError as e:
raise KeyError(
'Not a valid key, parameter %s is not associated with light %s)'
% (parameter, light_id))
def set_light(self, light_id, parameter, value=None, transitiontime=None):
""" Adjust properties of one or more lights.
light_id can be a single lamp or an array of lamps
parameters: 'on' : True|False , 'bri' : 0-254, 'sat' : 0-254, 'ct': 154-500
transitiontime : in **deciseconds**, time for this transition to take place
Note that transitiontime only applies to *this* light
command, it is not saved as a setting for use in the future!
Use the Light class' transitiontime attribute if you want
persistent time settings.
"""
if isinstance(parameter, dict):
data = parameter
else:
data = {parameter: value}
if transitiontime is not None:
data['transitiontime'] = int(round(
transitiontime)) # must be int for request format
light_id_array = light_id
if isinstance(light_id, int) or is_string(light_id):
light_id_array = [light_id]
result = []
for light in light_id_array:
logger.debug(str(data))
if parameter == 'name':
result.append(self.request('PUT', self.api + self.username + '/lights/' + str(
light_id), data))
else:
if is_string(light):
converted_light = self.get_light_id_by_name(light)
else:
converted_light = light
result.append(self.request('PUT', self.api + self.username + '/lights/' + str(
converted_light) + '/state', data))
if 'error' in list(result[-1][0].keys()):
logger.warn("ERROR: {0} for light {1}".format(
result[-1][0]['error']['description'], light))
logger.debug(result)
return result
# Sensors #####
@property
def sensors(self):
""" Access sensors as a list """
return self.get_sensor_objects()
def create_sensor(self, name, modelid, swversion, sensor_type, uniqueid, manufacturername, state={}, config={}, recycle=False):
""" Create a new sensor in the bridge. Returns (ID,None) of the new sensor or (None,message) if creation failed. """
data = {
"name": name,
"modelid": modelid,
"swversion": swversion,
"type": sensor_type,
"uniqueid": uniqueid,
"manufacturername": manufacturername,
"recycle": recycle
}
if (isinstance(state, dict) and state != {}):
data["state"] = state
if (isinstance(config, dict) and config != {}):
data["config"] = config
result = self.request('POST', self.api + self.username + '/sensors/', data)
if ("success" in result[0].keys()):
new_id = result[0]["success"]["id"]
logger.debug("Created sensor with ID " + new_id)
new_sensor = Sensor(self, int(new_id))
self.sensors_by_id[new_id] = new_sensor
self.sensors_by_name[name] = new_sensor
return new_id, None
else:
logger.debug("Failed to create sensor:" + repr(result[0]))
return None, result[0]
def get_sensor(self, sensor_id=None, parameter=None):
""" Gets state by sensor_id and parameter"""
if is_string(sensor_id):
sensor_id = self.get_sensor_id_by_name(sensor_id)
if sensor_id is None:
return self.request('GET', self.api + self.username + '/sensors/')
data = self.request(
'GET', self.api + self.username + '/sensors/' + str(sensor_id))
if isinstance(data, list):
logger.debug("Unable to read sensor with ID {0}: {1}".format(sensor_id, repr(data)))
return None
if parameter is None:
return data
return data[parameter]
def set_sensor(self, sensor_id, parameter, value=None):
""" Adjust properties of a sensor
sensor_id must be a single sensor.
parameters: 'name' : string
"""
if isinstance(parameter, dict):
data = parameter
else:
data = {parameter: value}
result = None
logger.debug(str(data))
result = self.request('PUT', self.api + self.username + '/sensors/' + str(
sensor_id), data)
if 'error' in list(result[0].keys()):
logger.warn("ERROR: {0} for sensor {1}".format(
result[0]['error']['description'], sensor_id))
logger.debug(result)
return result
def set_sensor_state(self, sensor_id, parameter, value=None):
""" Adjust the "state" object of a sensor
sensor_id must be a single sensor.
parameters: any parameter(s) present in the sensor's "state" dictionary.
"""
self.set_sensor_content(sensor_id, parameter, value, "state")
def set_sensor_config(self, sensor_id, parameter, value=None):
""" Adjust the "config" object of a sensor
sensor_id must be a single sensor.
parameters: any parameter(s) present in the sensor's "config" dictionary.
"""
self.set_sensor_content(sensor_id, parameter, value, "config")
def set_sensor_content(self, sensor_id, parameter, value=None, structure="state"):
""" Adjust the "state" or "config" structures of a sensor
"""
if (structure != "state" and structure != "config"):
logger.debug("set_sensor_current expects structure 'state' or 'config'.")
return False
if isinstance(parameter, dict):
data = parameter.copy()
else:
data = {parameter: value}
# Attempting to set this causes an error.
if "lastupdated" in data:
del data["lastupdated"]
result = None
logger.debug(str(data))
result = self.request('PUT', self.api + self.username + '/sensors/' + str(
sensor_id) + "/" + structure, data)
if 'error' in list(result[0].keys()):
logger.warn("ERROR: {0} for sensor {1}".format(
result[0]['error']['description'], sensor_id))
logger.debug(result)
return result
def delete_scene(self, scene_id):
try:
return self.request('DELETE', self.api + self.username + '/scenes/' + str(scene_id))
except:
logger.debug("Unable to delete scene with ID {0}".format(scene_id))
def delete_sensor(self, sensor_id):
try:
name = self.sensors_by_id[sensor_id].name
del self.sensors_by_name[name]
del self.sensors_by_id[sensor_id]
return self.request('DELETE', self.api + self.username + '/sensors/' + str(sensor_id))
except:
logger.debug("Unable to delete nonexistent sensor with ID {0}".format(sensor_id))
# Groups of lights #####
@property
def groups(self):
""" Access groups as a list """
return [Group(self, int(groupid)) for groupid in self.get_group().keys()]
def get_group_id_by_name(self, name):
""" Lookup a group id based on string name. Case-sensitive. """
groups = self.get_group()
for group_id in groups:
if PY3K:
if name == groups[group_id]['name']:
return group_id
else:
if name.decode('utf-8') == groups[group_id]['name']:
return group_id
return False
def get_group(self, group_id=None, parameter=None):
if is_string(group_id):
group_id = self.get_group_id_by_name(group_id)
if group_id is False:
logger.error('Group name does not exit')
return
if group_id is None:
return self.request('GET', self.api + self.username + '/groups/')
if parameter is None:
return self.request('GET', self.api + self.username + '/groups/' + str(group_id))
elif parameter == 'name' or parameter == 'lights':
return self.request('GET', self.api + self.username + '/groups/' + str(group_id))[parameter]
else:
return self.request('GET', self.api + self.username + '/groups/' + str(group_id))['action'][parameter]
def set_group(self, group_id, parameter, value=None, transitiontime=None):
""" Change light settings for a group
group_id : int, id number for group
parameter : 'name' or 'lights'
value: string, or list of light IDs if you're setting the lights
"""
if isinstance(parameter, dict):
data = parameter
elif parameter == 'lights' and (isinstance(value, list) or isinstance(value, int)):
if isinstance(value, int):
value = [value]
data = {parameter: [str(x) for x in value]}
else:
data = {parameter: value}
if transitiontime is not None:
data['transitiontime'] = int(round(
transitiontime)) # must be int for request format
group_id_array = group_id
if isinstance(group_id, int) or is_string(group_id):
group_id_array = [group_id]
result = []
for group in group_id_array:
logger.debug(str(data))
if is_string(group):
converted_group = self.get_group_id_by_name(group)
else:
converted_group = group
if converted_group is False:
logger.error('Group name does not exit')
return
if parameter == 'name' or parameter == 'lights':
result.append(self.request('PUT', self.api + self.username + '/groups/' + str(converted_group), data))
else:
result.append(self.request('PUT', self.api + self.username + '/groups/' + str(converted_group) + '/action', data))
if 'error' in list(result[-1][0].keys()):
logger.warn("ERROR: {0} for group {1}".format(
result[-1][0]['error']['description'], group))
logger.debug(result)
return result
def create_group(self, name, lights=None):
""" Create a group of lights
Parameters
------------
name : string
Name for this group of lights
lights : list
List of lights to be in the group.
"""
data = {'lights': [str(x) for x in lights], 'name': name}
return self.request('POST', self.api + self.username + '/groups/', data)
def delete_group(self, group_id):
return self.request('DELETE', self.api + self.username + '/groups/' + str(group_id))
# Scenes #####
@property
def scenes(self):
return [Scene(k, **v) for k, v in self.get_scene().items()]
def get_scene(self):
return self.request('GET', self.api + self.username + '/scenes')
def activate_scene(self, group_id, scene_id):
return self.request('PUT', self.api + self.username + '/groups/' +
str(group_id) + '/action',
{"scene": scene_id})
def run_scene(self, group_name, scene_name):
"""Run a scene by group and scene name.
As of 1.11 of the Hue API the scenes are accessable in the
API. With the gen 2 of the official HUE app everything is
organized by room groups.
This provides a convenience way of activating scenes by group
name and scene name. If we find exactly 1 group and 1 scene
with the matching names, we run them.
If we find more than one we run the first scene who has
exactly the same lights defined as the group. This is far from
perfect, but is convenient for setting lights symbolically (and
can be improved later).
:returns True if a scene was run, False otherwise
"""
groups = [x for x in self.groups if x.name == group_name]
scenes = [x for x in self.scenes if x.name == scene_name]
if len(groups) != 1:
logger.warn("run_scene: More than 1 group found by name {}".format(group_name))
return False
group = groups[0]
if len(scenes) == 0:
logger.warn("run_scene: No scene found {}".format(scene_name))
return False
if len(scenes) == 1:
self.activate_scene(group.group_id, scenes[0].scene_id)
return True
# otherwise, lets figure out if one of the named scenes uses
# all the lights of the group
group_lights = sorted([x.light_id for x in group.lights])
for scene in scenes:
if group_lights == scene.lights:
self.activate_scene(group.group_id, scene.scene_id)
return True
logger.warn("run_scene: did not find a scene: {} "
"that shared lights with group {}".format(scene_name, group_name))
return False
# Schedules #####
def get_schedule(self, schedule_id=None, parameter=None):
if schedule_id is None:
return self.request('GET', self.api + self.username + '/schedules')
if parameter is None:
return self.request('GET', self.api + self.username + '/schedules/' + str(schedule_id))
def create_schedule(self, name, time, light_id, data, description=' '):
schedule = {
'name': name,
'localtime': time,
'description': description,
'command':
{
'method': 'PUT',
'address': (self.api + self.username +
'/lights/' + str(light_id) + '/state'),
'body': data
}
}
return self.request('POST', self.api + self.username + '/schedules', schedule)
def set_schedule_attributes(self, schedule_id, attributes):
"""
:param schedule_id: The ID of the schedule
:param attributes: Dictionary with attributes and their new values
"""
return self.request('PUT', self.api + self.username + '/schedules/' + str(schedule_id), data=attributes)
def create_group_schedule(self, name, time, group_id, data, description=' '):
schedule = {
'name': name,
'localtime': time,
'description': description,
'command':
{
'method': 'PUT',
'address': (self.api + self.username +
'/groups/' + str(group_id) + '/action'),
'body': data
}
}
return self.request('POST', self.api + self.username + '/schedules', schedule)
def delete_schedule(self, schedule_id):
return self.request('DELETE', self.api + self.username + '/schedules/' + str(schedule_id))
class PhueAuthorisationError(PhueException):
pass
class PhueTokenExpired(PhueException):
pass
class PhueInvalidToken(PhueException):
pass
class RemoteBridge(Bridge):
def __init__(self, username=None, config_file_path=None, token_path=None):
if config_file_path is None:
if os.getenv(USER_HOME) is not None and os.access(os.getenv(USER_HOME), os.W_OK):
config_file_path = os.path.join(os.getenv(USER_HOME), '.python_hue_remote')
elif 'iPad' in platform.machine() or 'iPhone' in platform.machine() or 'iPad' in platform.machine():
config_file_path = os.path.join(os.getenv(USER_HOME), 'Documents', '.python_hue_remote')
else:
config_file_path = os.path.join(os.getcwd(), '.python_hue_remote')
self.token = self.get_token(token_path)
super(RemoteBridge, self).__init__(
ip='api.meethue.com', username=username,
config_file_path=config_file_path, api='/bridge/')
def get_token(self, token_path=None):
if token_path is None:
if os.getenv(USER_HOME) is not None and os.access(os.getenv(USER_HOME), os.W_OK):
token_path = os.path.join(os.getenv(USER_HOME), '.python_hue_token')
elif 'iPad' in platform.machine() or 'iPhone' in platform.machine() or 'iPad' in platform.machine():
token_path = os.path.join(os.getenv(USER_HOME), 'Documents', '.python_hue_token')
else:
token_path = os.path.join(os.getcwd(), '.python_hue_token')
return RemoteToken(load=token_path)
def press_link_button(self):
logger.info('Pressing remote virtual link button..')
data = {'linkbutton': True}
connection = httplib.HTTPSConnection(self.ip, timeout=10)
headers = {
'Content-Type': 'application/json',
'Authorization': self.token.bearer()
}
connection.request('PUT', '{}{}'.format(self.api, '0/config'),
json.dumps(data), headers)
result = connection.getresponse()
logger.debug('Response from API: {} {}'.format(
result.status, result.reason))
if result.status != 200:
raise PhueRegistrationException(result.status, result.read())
def get_ip_address(self, *args, **kwargs):
raise NotImplementedError
def register_app(self, *args, **kwargs):
self.press_link_button()
super(RemoteBridge, self).register_app(*args, **kwargs)
class RemoteToken(object):
""" A Python Class to interact with the Philips Hue remote access API to
manage the associated OAuth2 tokens.
You will need to provide your own account credentials from
https://developers.meethue.com/user/me/apps
"""
API = 'api.meethue.com'
ENDPOINT_AUTH = '/oauth2/auth'
ENDPOINT_TOKEN = '/oauth2/token'
ENDPOINT_REFRESH = '/oauth2/refresh'
def __init__(self, clientid=None, clientsecret=None, appid=None,
saveto=None, load=None):
""" A new token object requires your own credentials from
https://developers.meethue.com/user/me/apps
You will be required to visit a URL (generated by this object) and
paste in the callback URL that is created after authorisation through
the web browser is complete. The required information from that URL
will be parsed in the `new()` method.
When registering, you can use what ever you want for the callback URL
(it doesn't even need to be a valid URL).
Args:
clientid (str): The clientid you obtain from Hue when registering
for the Hue Remote API.
clientsecret (str): The clientsecret you have received from Hue
when registering for the Hue Remote API.
appid (str): The App name you specified when registering for the Hue
Remote API.
saveto (str optional): The file to save details to so they can be
reloaded at a later time.
load (str optional): If specified, load token data from the path
instead of attempting to authorise a new one. This will override
the remaining attributes, as they are filled from the file
contents instead.
"""
if load:
self.load(load)
elif (clientid and clientsecret and appid):
self.clientid = clientid
self.clientsecret = clientsecret
self.appid = appid
self.saveto = saveto
# self.state = binascii.b2a_hex(os.urandom(16))
self.__get_auth_url__()
else:
raise ValueError('Missing required argumets clientid, clientsecret and appid')
def __get_auth_url__(self):
URL = 'https://{}{}?clientid={}&appid={}&deviceid=phuepy&devicename=phuepy&response_type=code'.format(
self.API, self.ENDPOINT_AUTH, self.clientid, self.appid)
return URL
def __authorise__(self, resp):
""" Obtains new access and refresh tokens from the Philips Hue API
This method is intended to be called from the `__init__` method.
Returns:
None
"""
parsed = urlparse(resp)
code = parse_qs(parsed.query)['code'][0]
# Use the `code` from the callback URL to obtain an nonce from the API
params = urlencode({'code': code, 'grant_type': 'authorization_code'})
www_authenticate = self.__get_nonce__(self.ENDPOINT_TOKEN, params)
# Now exchange the `nonce` and `code` for our tokens.
headers = {'Authorization': self.__digest__(
self.ENDPOINT_TOKEN, www_authenticate)}
connection = httplib.HTTPSConnection(self.API, timeout=20)
connection.request('POST',
self.ENDPOINT_TOKEN + '?' + params,
None,
headers)
self.__parse_token_json__(connection.getresponse())
connection.close()
def __get_nonce__(self, url, params):
""" Obtains an nonce from the Philips Hue API to be used in the
digest calculations.
Returns a dict containing the `realm` and `nonce` from the
`WWW-Authenticate` header.
"""
connection = httplib.HTTPSConnection(self.API, timeout=20)
connection.request('POST', url + '?' + params)
response = connection.getresponse()
www_authenticate = response.getheader('WWW-Authenticate')
# Use a regex to parse the string contained in the `WWW-Authenticate`
# header and obtain the `realm` and `nonce` values.
# Example header: WWW-Authenticate: Digest realm="[email protected]", nonce="7b6e45de18ac4ee452ee0a0de91dbb10"
reg = re.compile(r'(\w+)[:=][\s"]?([^",]+)"?')
www_authenticate = dict(reg.findall(www_authenticate))
logger.debug('Obtained nonce: {}, realm: {}'
.format(www_authenticate['nonce'],
www_authenticate['realm']))
connection.close()
return www_authenticate
def __parse_token_json__(self, resp):
""" Parses the JSON string from the Philips Hue API that is received
when obtaining a new token or refreshing the tokens.
The expiry time is caluclated and recorded in UTC in the
`access_token_exp` and `refresh_token_exp` attributes.
"""
logger.debug('Response from API: {} {}'.format(resp.status, resp.reason))
token = json.loads(resp.read())
logger.debug('Text from API: {}'.format(token))
if resp.status == 200:
if token['access_token']: # All values will be blank if failed due to query error.
self.access_token = token['access_token']
self.refresh_token = token['refresh_token']
self.access_token_exp = datetime.utcnow() + timedelta(
seconds=(int(token['access_token_expires_in'])))
self.refresh_token_exp = datetime.utcnow() + timedelta(
seconds=(int(token['refresh_token_expires_in'])))
self.access_token_exp = self.access_token_exp.replace(
tzinfo=UTC)
self.refresh_token_exp = self.refresh_token_exp.replace(
tzinfo=UTC)
if self.saveto:
self.__save__()
else:
raise PhueAuthorisationError(None,
'Unable to obtain tokens from API')
else:
raise PhueAuthorisationError(token['ErrorCode'], token['Error'])
def __utc_to_local__(self, utc_dt):
""" Converts a UTC datetime object to an unaware datetime object in
local time
"""
timestamp = calendar.timegm(utc_dt.timetuple())
local_dt = datetime.fromtimestamp(timestamp)
return local_dt.replace(microsecond=utc_dt.microsecond)
def __digest__(self, url, www_authenticate, verb='POST'):
""" Returns an Authorization header that includes the digest hash
Args:
url (str): The API endpoint the request will be sent to
www_authenticate (dict): The nonce and realm from the initial
challange
verb (str optional): The HTTP request type (default POST)
Returns:
header: The Authorization header
"""
part1 = ':'.join([
self.clientid,
www_authenticate['realm'],
self.clientsecret,
])
part3 = ':'.join([verb, url])
if PY3K:
part1 = part1.encode()
part3 = part3.encode()
digest_plain = ':'.join([
hashlib.md5(part1).hexdigest(),
www_authenticate['nonce'],
hashlib.md5(part3).hexdigest(),
])
if PY3K:
digest_plain = digest_plain.encode()
return ('Digest username="{}", realm="{}", nonce="{}", uri="{}", response="{}"'
.format(
self.clientid,
www_authenticate['realm'],
www_authenticate['nonce'],
url,
hashlib.md5(digest_plain).hexdigest()
)
)
@property
def access_expires(self):
""" Returns a human friendly expiry time in local time.
"""
return self.__utc_to_local__(self.access_token_exp).strftime('%c')
@property
def refresh_expires(self):
""" Returns a human friendly expiry time in local time.
"""
return self.__utc_to_local__(self.refresh_token_exp).strftime('%c')
@property
def refresh_required(self):
""" Returns a boolean, `True` if the access token has expired (or will
expire in the next 30 minutes) and can be refreshed,
`False` if the access token is still valid.
"""
if (self.access_token_exp - timedelta(minutes=30)) > datetime.now(UTC):
return False
else:
return True
@property
def valid(self):
""" Returns a boolean, `True` if the access or refresh tokens
are still valid, `False` if they are both expired. If `False`, then a
new `Token` object will need to be created.
"""
if (self.refresh_token_exp) > datetime.now(UTC):
return True
else:
return False
def __save__(self, saveto=None):
""" Save the token data so it can be loaded later. This is meant to be
a private method. For normal interaction, use the `save()` method, not
this `__save__()` method.
Saving is acheived by serialising `__dict__` into JSON, removing
`self.saveto` from the JSON string and writing it to the location
specified in in the `saveto` parameter (if specified), or the location
in `self.saveto`. If both `saveto` and `self.saveto` are specified, the
method parameter will take precendece, and will update `self.saveto`
to match the new location. If none are specified, an `AttributeError`
is raised.
`saveto` is string representing a file path.
Args:
saveto (str, optional): The file to save details to,
overriding the existing save location (if it is set)
Returns:
None
"""
if saveto:
self.saveto = saveto
if self.saveto:
current_state = deepcopy(self.__dict__)
current_state.pop('saveto')
current_state['access_token_exp'] = (current_state['access_token_exp']
.replace(tzinfo=None).isoformat())
current_state['refresh_token_exp'] = (current_state['refresh_token_exp']
.replace(tzinfo=None).isoformat())
with open(self.saveto, 'w') as f:
text = json.dumps(current_state)
logger.debug('Saving text to token file: {}'.format(text))
f.write(text)
else:
raise AttributeError('No save location is defined.')
def save(self, saveto):
""" A public method to change (or set) the location for the token file.
The positional parameter `saveto` is required.
If the token file location is updated using this method, the old token
file will remain on the file system, but it's details will become
invalid by forcing an update of the access and refresh tokens in the new
token file, rendering the old ones useless.
Args:
saveto (str): The path of a file to save details to.
"""
self.__save__(saveto=saveto)
self.refresh(force=True)
def load(self, loadfrom):
""" Loads attributes from a JSON string stored in a file written by the
save method.
Args:
loadfrom (str): A path to the file to load details from
"""
try:
with open(loadfrom, 'r') as f:
text = f.read()
current_state = json.loads(text)
logger.debug('Loading token from file: {}'.format(text))
except FileNotFoundError:
raise PhueInvalidToken(None, 'No token exists yet. Generate one first')
try:
self.saveto = loadfrom
self.clientid = current_state['clientid']
self.clientsecret = current_state['clientsecret']
self.appid = current_state['appid']
self.access_token = current_state['access_token']
self.refresh_token = current_state['refresh_token']
self.access_token_exp = datetime.strptime(
current_state['access_token_exp'],
'%Y-%m-%dT%H:%M:%S.%f').replace(tzinfo=UTC)
self.refresh_token_exp = datetime.strptime(
current_state['refresh_token_exp'],
'%Y-%m-%dT%H:%M:%S.%f').replace(tzinfo=UTC)
except KeyError:
raise PhueInvalidToken(None, 'This token file is corrupt or invalid')
self.refresh()
def refresh(self, force=False):
""" Refreshes the current tokens.
If the access token is still valid (i.e. doesn't expire in the next 30
minutes), then the tokens won't be refreshed unless the `force`
parameter is `True`.
If the saveto attribute has a value, then the new token details
are also saved to the file.
Args:
force (bool optional): Refresh the token, even if it hasn't expired
Returns:
True if refreshed, False if not refreshed.
"""
if self.valid:
if force or self.refresh_required:
params = urlencode({'grant_type': 'refresh_token'})
www_authenticate = self.__get_nonce__(
self.ENDPOINT_REFRESH, params)
data = urlencode({'refresh_token': self.refresh_token})
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': self.__digest__(self.ENDPOINT_REFRESH,
www_authenticate)
}
connection = httplib.HTTPSConnection(self.API, timeout=20)
connection.request('POST',
self.ENDPOINT_REFRESH + '?' + params,
data,
headers)
self.__parse_token_json__(connection.getresponse())
logger.info('Token refreshed. Access expires: {}, Refresh Expires: {}'
.format(self.access_token_exp.isoformat(),
self.refresh_token_exp.isoformat()))
connection.close()
return True
else:
logger.info('Refresh not required.')
return False
else:
msg = 'This token has expired. Please generate a new token.'
logger.exception(msg)
raise PhueTokenExpired(None, msg)
def bearer(self):
""" A convinence method to get the current access token in a format to
use in an Authorization header
If the access token needs refreshing, this method will refresh it first,
then return the updated access token.
Args:
None
Returns:
token (str): A valid Bearer token to use in the Authorization header
when accessing the Philips Hue API
"""
if self.valid:
self.refresh(force=False)
return 'Bearer {}'.format(self.access_token)
else:
msg = 'This token has expired. Please generate a new token.'
logger.exception(msg)
raise PhueTokenExpired(None, msg)
if __name__ == '__main__':
import argparse
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('--config-file-path', required=False)
group = parser.add_mutually_exclusive_group()
group.add_argument('--token', action='store_true')
group.add_argument('--host')
args = parser.parse_args()
if args.token:
if os.getenv(USER_HOME) is not None and os.access(os.getenv(USER_HOME), os.W_OK):
token_path = os.path.join(os.getenv(USER_HOME), '.python_hue_token')
elif 'iPad' in platform.machine() or 'iPhone' in platform.machine() or 'iPad' in platform.machine():
token_path = os.path.join(os.getenv(USER_HOME), 'Documents', '.python_hue_token')
else:
token_path = os.path.join(os.getcwd(), '.python_hue_token')
clientid = input('Client ID: ')
clientsecret = input('Client Secret: ')
appid = input('App ID: ')
saveto = input('Save token to (default: {}): '.format(token_path))
if not saveto:
saveto = token_path
RemoteToken(clientid, clientsecret, appid, saveto=saveto)
print('Saved token to {}'.format(saveto))
else:
while True:
try:
b = Bridge(args.host, config_file_path=args.config_file_path)
break
except PhueRegistrationException as e:
input('Press button on Bridge then hit Enter to try again') | [
"[email protected]"
] | |
e2d024f8f3608a4e08ea97e099b8e312e786e31b | 0860284b9a76ac1921c65ea8694dab8c9b2d6eb1 | /shop/migrations/0003_item_image.py | 6e550bf2d4f303bf92abe55dae13c4465589d7f9 | [] | no_license | alphashooter/tms-z30 | 10dd2f32ab7c9fd150f27883456f5a00f2c2b8fc | 6ce7a93e00b52432dfed22d524e2a377fceed619 | refs/heads/master | 2022-11-21T18:29:37.014735 | 2020-07-29T19:27:12 | 2020-07-29T19:27:12 | 281,730,996 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | # Generated by Django 3.0.8 on 2020-07-17 17:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0002_order_name'),
]
operations = [
migrations.AddField(
model_name='item',
name='image',
field=models.ImageField(null=True, upload_to='D:\\workspace\\tms\\root\\media'),
),
]
| [
"[email protected]"
] | |
1001282a57ffd9fe8fc30ddec023555f6f51e18f | fef8f43025cff430d9aea080885173d9c22b3cb6 | /etalia/nlp/migrations/0007_userfingerprint_state.py | a6ea1a7be2e4ec6595a5e55ede3e01cd5c623ca8 | [] | no_license | GemmaAA1/etalia-open | 30a083141330e227ac1de9855894bfb6e476e3cc | 260ce54d2da53c943d8b82fa9d40bb0c0df918a6 | refs/heads/master | 2023-03-28T03:33:13.771987 | 2017-10-30T00:55:27 | 2017-10-30T00:55:27 | 351,120,827 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 488 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('nlp', '0006_auto_20160608_1959'),
]
operations = [
migrations.AddField(
model_name='userfingerprint',
name='state',
field=models.CharField(max_length=3, blank=True, choices=[('NON', 'Uninitialized'), ('IDL', 'Idle'), ('ING', 'Syncing')]),
),
]
| [
"[email protected]"
] | |
fba761f2efbcc2139b8f5aad36b10c495f326002 | 51f887286aa3bd2c3dbe4c616ad306ce08976441 | /pybind/slxos/v17r_2_00/mac/access_list/__init__.py | 67b9066f535f1a6930530d79af07eb76648b8b95 | [
"Apache-2.0"
] | permissive | b2220333/pybind | a8c06460fd66a97a78c243bf144488eb88d7732a | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | refs/heads/master | 2020-03-18T09:09:29.574226 | 2018-04-03T20:09:50 | 2018-04-03T20:09:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,123 | py |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import standard
import extended
class access_list(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-mac-access-list - based on the path /mac/access-list. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__standard','__extended',)
_yang_name = 'access-list'
_rest_name = 'access-list'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__extended = YANGDynClass(base=YANGListType("name",extended.extended, yang_name="extended", rest_name="extended", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'Extended MAC ACL', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'extMacAclCP', u'cli-mode-name': u'conf-macl-ext'}}), is_container='list', yang_name="extended", rest_name="extended", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Extended MAC ACL', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'extMacAclCP', u'cli-mode-name': u'conf-macl-ext'}}, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='list', is_config=True)
self.__standard = YANGDynClass(base=YANGListType("name",standard.standard, yang_name="standard", rest_name="standard", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'Standard MAC ACL', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'macAclCP', u'cli-mode-name': u'conf-macl-std'}}), is_container='list', yang_name="standard", rest_name="standard", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Standard MAC ACL', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'macAclCP', u'cli-mode-name': u'conf-macl-std'}}, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='list', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'mac', u'access-list']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'mac', u'access-list']
def _get_standard(self):
"""
Getter method for standard, mapped from YANG variable /mac/access_list/standard (list)
"""
return self.__standard
def _set_standard(self, v, load=False):
"""
Setter method for standard, mapped from YANG variable /mac/access_list/standard (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_standard is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_standard() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("name",standard.standard, yang_name="standard", rest_name="standard", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'Standard MAC ACL', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'macAclCP', u'cli-mode-name': u'conf-macl-std'}}), is_container='list', yang_name="standard", rest_name="standard", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Standard MAC ACL', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'macAclCP', u'cli-mode-name': u'conf-macl-std'}}, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """standard must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("name",standard.standard, yang_name="standard", rest_name="standard", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'Standard MAC ACL', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'macAclCP', u'cli-mode-name': u'conf-macl-std'}}), is_container='list', yang_name="standard", rest_name="standard", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Standard MAC ACL', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'macAclCP', u'cli-mode-name': u'conf-macl-std'}}, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='list', is_config=True)""",
})
self.__standard = t
if hasattr(self, '_set'):
self._set()
def _unset_standard(self):
self.__standard = YANGDynClass(base=YANGListType("name",standard.standard, yang_name="standard", rest_name="standard", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'Standard MAC ACL', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'macAclCP', u'cli-mode-name': u'conf-macl-std'}}), is_container='list', yang_name="standard", rest_name="standard", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Standard MAC ACL', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'macAclCP', u'cli-mode-name': u'conf-macl-std'}}, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='list', is_config=True)
def _get_extended(self):
"""
Getter method for extended, mapped from YANG variable /mac/access_list/extended (list)
"""
return self.__extended
def _set_extended(self, v, load=False):
"""
Setter method for extended, mapped from YANG variable /mac/access_list/extended (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_extended is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_extended() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("name",extended.extended, yang_name="extended", rest_name="extended", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'Extended MAC ACL', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'extMacAclCP', u'cli-mode-name': u'conf-macl-ext'}}), is_container='list', yang_name="extended", rest_name="extended", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Extended MAC ACL', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'extMacAclCP', u'cli-mode-name': u'conf-macl-ext'}}, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """extended must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("name",extended.extended, yang_name="extended", rest_name="extended", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'Extended MAC ACL', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'extMacAclCP', u'cli-mode-name': u'conf-macl-ext'}}), is_container='list', yang_name="extended", rest_name="extended", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Extended MAC ACL', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'extMacAclCP', u'cli-mode-name': u'conf-macl-ext'}}, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='list', is_config=True)""",
})
self.__extended = t
if hasattr(self, '_set'):
self._set()
def _unset_extended(self):
self.__extended = YANGDynClass(base=YANGListType("name",extended.extended, yang_name="extended", rest_name="extended", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'Extended MAC ACL', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'extMacAclCP', u'cli-mode-name': u'conf-macl-ext'}}), is_container='list', yang_name="extended", rest_name="extended", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Extended MAC ACL', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'extMacAclCP', u'cli-mode-name': u'conf-macl-ext'}}, namespace='urn:brocade.com:mgmt:brocade-mac-access-list', defining_module='brocade-mac-access-list', yang_type='list', is_config=True)
standard = __builtin__.property(_get_standard, _set_standard)
extended = __builtin__.property(_get_extended, _set_extended)
_pyangbind_elements = {'standard': standard, 'extended': extended, }
| [
"[email protected]"
] | |
c89d3efc23bc0b5fcc1e8a00c036bb63a7b47892 | 2f63688febd21dc3ae6b19abfa79ad313c820154 | /0654_Maximum_Binary_Tree/try_1.py | 8c0bfb903b3d382b63155f8803749062365dcbb9 | [] | no_license | novayo/LeetCode | cadd03587ee4ed6e35f60294070165afc1539ac8 | 54d0b3c237e0ffed8782915d6b75b7c6a0fe0de7 | refs/heads/master | 2023-08-14T00:35:15.528520 | 2023-07-30T05:56:05 | 2023-07-30T05:56:05 | 200,248,146 | 8 | 1 | null | 2022-11-19T04:37:54 | 2019-08-02T14:24:19 | Python | UTF-8 | Python | false | false | 2,129 | py | class Node:
def __init__(self, val, l_index, r_index, left=None, right=None):
self.val = val
self.l_index = l_index
self.r_index = r_index
self.left = left
self.right = right
class SegmentTree:
def __init__(self, arr):
self.root = self.build(arr)
def build(self, arr):
def _build(l, r):
if l == r:
return Node(arr[l], l, r)
mid = l + (r-l) // 2
left = _build(l, mid)
right = _build(mid+1, r)
return Node(max(left.val, right.val), l, r, left, right)
return _build(0, len(arr)-1)
def query(self, l, r):
def _query(root, l, r):
if l > r:
return -float('inf')
if root.l_index == l and root.r_index == r:
return root.val
mid = root.l_index + (root.r_index-root.l_index) // 2
if mid > r:
return _query(root.left, l, r)
elif mid < l:
return _query(root.right, l, r)
else:
return max(
_query(root.left, l, mid),
_query(root.right, mid+1, r)
)
return _query(self.root, l, r)
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def constructMaximumBinaryTree(self, nums: List[int]) -> Optional[TreeNode]:
index = {}
for i, num in enumerate(nums):
index[num] = i
tree = SegmentTree(nums)
def build(l, r):
if l > r:
return None
if l == r:
return TreeNode(nums[l])
_max_num = tree.query(l, r)
mid = index[_max_num]
left = build(l, mid-1)
right = build(mid+1, r)
return TreeNode(_max_num, left, right)
return build(0, len(nums)-1)
| [
"[email protected]"
] | |
24e92581df17f8b0acfef9c6d17c0bd76fe68dcc | 36132d1a4a2669775fbf1f86f6c4b1f341c6a85e | /aliyun-python-sdk-edas/aliyunsdkedas/request/v20170801/ListSwimmingLaneGroupRequest.py | 2f72af50804a591551a515bfd49d9c3cbd17b700 | [
"Apache-2.0"
] | permissive | ghosthgy/aliyun-openapi-python-sdk | 0f676e47d0df51d9e0727a0ae00ed9c86fe756f8 | eb809a296864f29f8fce6e82adf29fdeedb41c0a | refs/heads/master | 2023-03-26T00:49:11.347883 | 2021-03-25T09:25:14 | 2021-03-25T09:25:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,552 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RoaRequest
from aliyunsdkedas.endpoint import endpoint_data
class ListSwimmingLaneGroupRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'Edas', '2017-08-01', 'ListSwimmingLaneGroup','edas')
self.set_uri_pattern('/pop/v5/trafficmgnt/swimming_lane_groups')
self.set_method('GET')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_LogicalRegionId(self):
return self.get_query_params().get('LogicalRegionId')
def set_LogicalRegionId(self,LogicalRegionId):
self.add_query_param('LogicalRegionId',LogicalRegionId) | [
"[email protected]"
] | |
6841fc1c51d3d8f1979c9ba3c3f3f3710cdf8a50 | 1819b161df921a0a7c4da89244e1cd4f4da18be4 | /WhatsApp_FarmEasy/env/lib/python3.6/site-packages/tests/integration/api/v2010/account/usage/record/test_monthly.py | 86bba622608f61100272a0416d541f1e3a226cbb | [
"MIT"
] | permissive | sanchaymittal/FarmEasy | 889b290d376d940d9b3ae2fa0620a573b0fd62a0 | 5b931a4287d56d8ac73c170a6349bdaae71bf439 | refs/heads/master | 2023-01-07T21:45:15.532142 | 2020-07-18T14:15:08 | 2020-07-18T14:15:08 | 216,203,351 | 3 | 2 | MIT | 2023-01-04T12:35:40 | 2019-10-19T12:32:15 | JavaScript | UTF-8 | Python | false | false | 5,301 | py | # coding=utf-8
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class MonthlyTestCase(IntegrationTestCase):
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts(sid="ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.usage \
.records \
.monthly.list()
self.holodeck.assert_has_request(Request(
'get',
'https://api.twilio.com/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Usage/Records/Monthly.json',
))
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"end": 0,
"first_page_uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Usage/Records/Monthly?Page=0&PageSize=1",
"last_page_uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Usage/Records/Monthly?Page=3449&PageSize=1",
"next_page_uri": null,
"num_pages": 3450,
"page": 0,
"page_size": 1,
"previous_page_uri": null,
"start": 0,
"total": 3450,
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Usage/Records/Monthly",
"usage_records": [
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"api_version": "2010-04-01",
"category": "sms-inbound-shortcode",
"count": "0",
"count_unit": "messages",
"description": "Short Code Inbound SMS",
"end_date": "2015-09-04",
"price": "0",
"price_unit": "usd",
"start_date": "2015-09-01",
"subresource_uris": {
"all_time": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Usage/Records/AllTime.json?Category=sms-inbound-shortcode",
"daily": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Usage/Records/Daily.json?Category=sms-inbound-shortcode",
"last_month": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Usage/Records/LastMonth.json?Category=sms-inbound-shortcode",
"monthly": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Usage/Records/Monthly.json?Category=sms-inbound-shortcode",
"this_month": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Usage/Records/ThisMonth.json?Category=sms-inbound-shortcode",
"today": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Usage/Records/Today.json?Category=sms-inbound-shortcode",
"yearly": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Usage/Records/Yearly.json?Category=sms-inbound-shortcode",
"yesterday": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Usage/Records/Yesterday.json?Category=sms-inbound-shortcode"
},
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Usage/Records/Monthly?Category=sms-inbound-shortcode&StartDate=2015-09-01&EndDate=2015-09-04",
"usage": "0",
"usage_unit": "messages"
}
]
}
'''
))
actual = self.client.api.v2010.accounts(sid="ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.usage \
.records \
.monthly.list()
self.assertIsNotNone(actual)
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"end": 0,
"first_page_uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Usage/Records/Monthly?Page=0&PageSize=1",
"last_page_uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Usage/Records/Monthly?Page=3449&PageSize=1",
"next_page_uri": null,
"num_pages": 3450,
"page": 0,
"page_size": 1,
"previous_page_uri": null,
"start": 0,
"total": 3450,
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Usage/Records/Monthly",
"usage_records": []
}
'''
))
actual = self.client.api.v2010.accounts(sid="ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") \
.usage \
.records \
.monthly.list()
self.assertIsNotNone(actual)
| [
"[email protected]"
] | |
32daf9deafe8d3b0a6f9ae47d1cb9f06a97df5d7 | 9766c2e479e99cca5bf7cc834c949fc4d5286275 | /SRC/common/IO/menuparser.py | b991253676f3cd18d9d14651d625bd780cf3b437 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | UstbCmsPjy/OOF2 | 4c141e8da3c7e3c5bc9129c2cb27ed301455a155 | f8539080529d257a02b8f5cc44040637387ed9a1 | refs/heads/master | 2023-05-05T09:58:22.597997 | 2020-05-28T23:05:30 | 2020-05-28T23:05:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,260 | py | # -*- python -*-
# This software was produced by NIST, an agency of the U.S. government,
# and by statute is not subject to copyright in the United States.
# Recipients of this software assume all responsibilities associated
# with its operation, modification and maintenance. However, to
# facilitate maintenance we ask that before distributing modified
# versions of this software, you first contact the authors at
# [email protected].
# The MenuParser class reads a file or other input source and executes
# the OOFMenu commands it contains. The parser has two Modes: ascii
# and binary. In ascii mode the input must be ascii characters. In
# binary mode the input is (surprise) binary. In neither mode is the
# input eval'd by the Python interpreter.
# The inputsource argument to the MenuParser's __init__ is an object
# that has two functions, getLine() and getBytes(nbytes), which return
# strings from the input. getLine() is only used in ascii mode and
# getBytes is only used in binary mode, so it's not strictly necessary
# for the inputsource object to provide both functions.
# The MenuParser switches between modes by creating new objects of the
# MenuParserMode class. MenuParserMode subclasses must provide
# functions getMenuItem and getArguments. getMenuItem takes an
# OOFMenu as an argument, reads some input (using inputsource) and
# returns an OOFMenuItem. It should return None if there's no menu
# item to be read. getArguments takes an OOFMenuItem argument and
# returns both a list of non-keyword arguments and a dictionary of
# keyword arguments.
from ooflib.SWIG.common import ooferror
from ooflib.common import debug
from ooflib.common import utils
import os
import stat
import sys
class MenuParser:
mode_binary = 0
mode_ascii = 1
def __init__(self, inputsource, menu, mode=mode_ascii):
self.inputsource = inputsource
self.menu = menu
if mode is MenuParser.mode_ascii:
self.asciiMode()
else:
self.binaryMode()
def getLine(self):
return self.inputsource.getLine()
def getBytes(self, n):
return self.inputsource.getBytes(n)
def asciiMode(self):
self.mode = AsciiMenuParser(self)
def binaryMode(self):
from ooflib.common.IO import binarydata
self.mode = binarydata.BinaryMenuParser(self)
def run1(self):
menuitem = self.mode.getMenuItem(self.menu)
if menuitem is None:
return 0
args, kwargs = self.mode.getArguments(menuitem)
if args:
raise ooferror.ErrDataFileError(
"All arguments to menu commands must be keyword arguments!")
menuitem.parser = self
menuitem(**kwargs)
menuitem.parser = None
return 1
def run(self):
self.menu.root().quietmode(True) # don't echo commands in debug mode
self.menu.root().haltLog()
try:
while self.run1():
pass
finally:
self.menu.root().resumeLog()
self.menu.root().quietmode(False)
#######################
class InputSource:
def getLine(self):
pass
def getBytes(self, n):
pass
class FileInput(InputSource):
def __init__(self, filename):
self.filename = filename
self.file = open(filename, 'rb')
self.bytecount = 0
self.totalbytes = os.stat(filename)[stat.ST_SIZE]
def getLine(self):
line = self.file.readline()
self.bytecount += len(line)
debug.msg("%s: %s" %(self.filename, line[:min(len(line)-1, 100)]))
return line
def getBytes(self, n):
b = self.file.read(n)
self.bytecount += len(b)
if len(b) != n:
raise ooferror.ErrDataFileError(
"Premature EOF at byte %d! (%d missing)" %
(self.bytecount, n-len(b)))
return b
class ProgFileInput(FileInput):
## FileInput with a ProgressBar
def __init__(self, filename, progress):
self.progress = progress
self._error = False
FileInput.__init__(self, filename)
def reportProgress(self):
self.progress.setFraction((1.0*self.bytecount)/self.totalbytes)
self.progress.setMessage("Read %d/%d bytes" %
(self.bytecount, self.totalbytes))
def getLine(self):
if self.progress.stopped():
self._error = True
raise ooferror.ErrDataFileError("Interrupted!")
line = FileInput.getLine(self)
self.reportProgress()
return line
def getBytes(self, n):
if self.progress.stopped():
self._error = True
raise ooferror.ErrDataFileError("Interrupted!")
b = FileInput.getBytes(self, n)
self.reportProgress()
return b
def error(self):
return self._error
class StringInput(InputSource):
def __init__(self, strng):
self.string = strng
self.position = 0
def getLine(self):
result = self.string
self.string = ""
return result
def getBytes(self, n):
end = self.position + n
if end > len(self.string):
end = len(self.string)
result = self.string[self.position:end]
self.position = end
return result
#######################
class MenuParserMode:
# The derived classes must provide the following functions:
def __init__(self, masterparser):
pass
def getMenuItem(self, menu):
raise "Somebody forgot to define %s.getMenuItem()" \
% self.__class__.__name__
def getArguments(self, menuitem):
# Returns a tuple containing the name of the argument and its
# value. It doesn't return the *string* containing the value,
# because that wouldn't work for a BinaryFileMenuParser.
# Returns None if there are no more arguments.
raise "Somebody forgot to define %s.getArguments()" \
% self.__class__.__name__
###########################
###########################
###########################
CMDSEP = "." # separates command and subcommand
ASSIGN = "=" # assigns argument values
ARGSEP = "," # separates arguments
BGNARG = "(" # begins arguments
ENDARG = ")" # ends arguments
SQUOTE = "'" # single quote
DQUOTE = '"' # double quote
COMMENT = "#"
ESCAPE = "\\" # continuation at EOL, quote special
BGNLIST = '['
ENDLIST = ']'
BGNTUPLE = '('
ENDTUPLE = ')'
BGNINDEX = '['
ENDINDEX = ']'
def legalname(name):
a = name[0]
if not (a.isalpha() or a == "_"):
return 0
for x in a[1:]:
if not (x.isalnum() or x == "_"):
return 0
return 1
def string2number(strng):
try:
return int(strng)
except ValueError:
return float(strng)
class AsciiMenuParser(MenuParserMode):
# The parser does *NOT* understand backslashes correctly, but
# since it's supposed to be used to read data files, not general
# python files, that's not a big deal. Backslashes are only
# understood in the context of quoted strings, for escaping
# internal quotation marks.
# The parser is always in one of these states:
state_idle = 0 # none of the below
state_cmd = 1 # processing menu items
state_arg = 2 # looking for argument name=value
def __init__(self, masterparser):
# ascii mode stuff
self.masterparser = masterparser
self.buffer = ""
self.bufpos = 0 # position in buffer
self.buflen = 0
self.parendepth = 0
self.state = AsciiMenuParser.state_idle
self.storedTokens = []
def fetchLine(self):
self.buffer = self.masterparser.getLine()
self.bufpos = 0
self.buflen = len(self.buffer)
def nextToken(self):
# Retrieve the next unit of information ('token') from the input.
if self.storedTokens:
return self.storedTokens.pop()
return self._nextToken()
def pushbackToken(self, token):
# Restore a unit of information to the input. It will be
# retrieved on the next call to nextToken(). An arbitrary
# number of tokens can be pushed back.
self.storedTokens.append(token)
def skipSpace(self):
while self.bufpos < self.buflen and self.buffer[self.bufpos].isspace():
self.bufpos += 1
def clearBuffer(self):
self.buffer = ""
self.buflen = 0
self.bufpos = 0
def _nextToken(self):
# Do the actual work of retrieving information from the input.
# The token is removed from self.buffer and returned.
# Make sure the buffer has something in it. Get more input if needed.
while self.bufpos == self.buflen:
self.fetchLine()
if not self.buffer: # no more input
return None
self.buflen = len(self.buffer)
self.bufpos = 0
self.skipSpace() # adjusts bufpos
self.skipSpace()
if self.bufpos == self.buflen:
return self._nextToken()
# Discard comments.
if self.buffer[self.bufpos] == COMMENT:
self.clearBuffer()
return self._nextToken()
# Special characters are tokens all by themselves, unless
# they're quotation marks or group delimiters, in which case
# the whole quoted string or group is a token.
c = self.buffer[self.bufpos]
if c in specialchars[self.state]:
if c in quotechars:
return self.processQuote()
self.bufpos += 1
return c
# current char is not a special character. Token is all chars to
# next special character.
end = self.bufpos + 1
while end < self.buflen and \
not self.buffer[end] in specialchars[self.state]:
end += 1
token = self.buffer[self.bufpos:end] # don't include special char
self.bufpos = end
return token.rstrip()
def processQuote(self):
quotechar = self.buffer[self.bufpos]
quote = ""
while 1:
# look for closing quote
end = self.bufpos + 1
while end < self.buflen and self.buffer[end] != quotechar:
end += 1
if end == self.buflen: # keep looking!
quote += self.buffer[self.bufpos:end]
self.fetchLine() # look at next line
if not self.buffer:
raise ooferror.ErrDataFileError("unmatched quotation marks")
else: # found closing quote
quote += self.buffer[self.bufpos:end+1]
self.bufpos = end + 1
if quote[-2] != ESCAPE:
return quote
else:
quote = quote[:-2] + quote[-1] # remove ESCAPE
# keep looking for more input
def getMenuItem(self, menu):
ident = self.getIdentifier()
if ident is None:
return None
menuitem = getattr(menu, ident)
return self.getSubMenuItem(menuitem)
def getSubMenuItem(self, menu):
ident = self.getIdentifier()
if ident is None:
return menu
menuitem = getattr(menu, ident)
return self.getSubMenuItem(menuitem)
def getIdentifier(self):
token = self.nextToken()
if not token:
return None # EOF
if self.state is AsciiMenuParser.state_idle:
if not legalname(token):
raise ooferror.ErrDataFileError("Illegal command: '%s'" % token)
self.state = AsciiMenuParser.state_cmd
return token
if self.state is AsciiMenuParser.state_cmd:
if token[0] == CMDSEP:
self.state = AsciiMenuParser.state_idle
return self.getIdentifier()
if token[0] == BGNARG:
self.parendepth += 1
self.state = AsciiMenuParser.state_arg
return None
def getArguments(self, menuitem):
# Returns list of args and dictionary of kwargs
args = []
kwargs = {}
if self.state is not AsciiMenuParser.state_arg:
return args, kwargs
while 1:
token0 = self.nextToken()
if token0 is None:
raise ooferror.ErrDataFileError("Premature EOF in data file?")
if token0 in endSequence:
# Does no checking for matching () or [] pairs!
self.parendepth -= 1
if self.parendepth == 0:
self.state = AsciiMenuParser.state_idle
return args, kwargs
if token0 == ARGSEP:
continue
token1 = self.nextToken()
if token1 != ASSIGN: # not a keyword argument
self.pushbackToken(token1) # to be read again
args.append(self.getArgumentValue(token0))
else: # key word argument
if not legalname(token0):
raise ooferror.ErrDataFileError(
"Illegal argument name: '%s'" % token0)
token2 = self.nextToken()
kwargs[token0] = self.getArgumentValue(token2)
def getArgumentValue(self, token):
if token[0] in quotechars: # it's a string
return token[1:-1] # strip the quotation marks
if token == BGNLIST:
self.parendepth += 1
return list(self.getArguments(None)[0])
if token == BGNTUPLE:
self.parendepth += 1
return tuple(self.getArguments(None)[0])
try: # is it a number?
val = string2number(token)
except ValueError: # no, it's not
pass
else:
return val # yes, it's a number
# Is it None?
if token == 'None':
return None
if token == 'True':
return True
if token == 'False':
return False
# Is it a function or variable defined in the OOF namespace?
try:
argval = utils.OOFeval_r(token)
except KeyError:
raise ooferror.ErrDataFileError("Incomprehensible argument: %s"
% token)
# If it's a function, the next token is an open paren.
nexttoken = self.nextToken()
if nexttoken == BGNARG:
self.parendepth += 1
args, kwargs = self.getArguments(argval)
return argval(*args, **kwargs)
if nexttoken == BGNINDEX:
self.parendepth += 1
args, kwargs = self.getArguments(argval)
return argval[args[0]]
self.pushbackToken(nexttoken) # to be read again
return argval # arg was an OOF namespace variable
###################
# The parser's state affects the set of characters which have special
# meaning to it. In particular, "." is the command separator in
# command and idle modes, but is a decimal point in argument mode.
# (Perhaps there should be a special number mode.) The sets of
# special characters are stored in a dictionary keyed by the parser
# state.
specialchars = {}
specialchars[AsciiMenuParser.state_cmd] = (
CMDSEP, BGNARG, ENDARG, SQUOTE, DQUOTE, COMMENT)
# Are quotes special in state_cmd? They should never be encountered.
specialchars[AsciiMenuParser.state_idle] = \
specialchars[AsciiMenuParser.state_cmd]
specialchars[AsciiMenuParser.state_arg] = (
ASSIGN, ARGSEP, BGNARG, ENDARG, SQUOTE, DQUOTE, COMMENT,
BGNLIST, ENDLIST, BGNTUPLE, ENDTUPLE, BGNINDEX, ENDINDEX)
quotechars = (SQUOTE, DQUOTE)
endSequence = (ENDLIST, ENDTUPLE, ENDINDEX)
| [
"[email protected]"
] | |
7affcc21533613ea034e2ccd858ddcbe1173fcbd | b1bc2e54f8cd35c9abb6fc4adb35b386c12fe6b4 | /toontown/src/golf/BuildGeometry.py | 56cbde4b8910179665112162a273d7c82d244497 | [] | no_license | satire6/Anesidora | da3a44e2a49b85252b87b612b435fb4970469583 | 0e7bfc1fe29fd595df0b982e40f94c30befb1ec7 | refs/heads/master | 2022-12-16T20:05:13.167119 | 2020-09-11T16:58:04 | 2020-09-11T17:02:06 | 294,751,966 | 89 | 32 | null | null | null | null | UTF-8 | Python | false | false | 13,234 | py | from direct.distributed import DistributedObject
from direct.directnotify import DirectNotifyGlobal
from toontown.toonbase import ToontownGlobals
from pandac.PandaModules import *
from math import *
import math
GEO_ID = 0
def circleX(angle, radius, centerX, centerY):
x = radius * cos(angle) + centerX
return x
def circleY(angle, radius, centerX, centerY):
y = radius * sin(angle) + centerY
return y
def getCirclePoints(segCount, centerX, centerY, radius, wideX= 1.0, wideY = 1.0):
returnShape = []
for seg in range(0, segCount):
coordX = wideX * (circleX(((pi * 2.0) * float(float(seg) / float(segCount))), radius, centerX, centerY))
coordY = wideY * (circleY(((pi * 2.0) * float(float(seg) / float(segCount))), radius, centerX, centerY))
returnShape.append((coordX, coordY, 1))
coordX = wideX * (circleX(((pi * 2.0) * float(0 / segCount)), radius, centerX, centerY))
coordY = wideY * (circleY(((pi * 2.0) * float(0 / segCount)), radius, centerX, centerY))
returnShape.append((coordX, coordY, 1))
return returnShape
def addCircle(attachNode, vertexCount, radius, color = Vec4(1.0, 1.0, 1.0, 1.0), layer = 0):
targetGN=GeomNode("Circle Geom")
zFloat = 0.025
targetCircleShape = getCirclePoints(5 + (vertexCount), 0.0, 0.0, radius)
gFormat = GeomVertexFormat.getV3cp()
targetCircleVertexData = GeomVertexData("holds my vertices", gFormat, Geom.UHDynamic)
targetCircleVertexWriter = GeomVertexWriter(targetCircleVertexData, "vertex")
targetCircleColorWriter = GeomVertexWriter(targetCircleVertexData, "color")
targetCircleVertexWriter.addData3f(0.0, 0.0, zFloat) #center
targetCircleColorWriter.addData4f(color[0], color[1], color[2], color[3])
for vertex in targetCircleShape:
targetCircleVertexWriter.addData3f(0.0 + vertex[0] , 0.0 + vertex[1] , zFloat)
targetCircleColorWriter.addData4f(color[0], color[1], color[2], color[3])
targetTris=GeomTrifans(Geom.UHStatic) # triangle obejcet
sizeTarget = len(targetCircleShape)
targetTris.addVertex(0)
for countVertex in range(1, sizeTarget + 1):
targetTris.addVertex(countVertex)
targetTris.addVertex(1)
targetTris.closePrimitive()
targetGeom=Geom(targetCircleVertexData)
targetGeom.addPrimitive(targetTris)
attachNode.addGeom(targetGeom)
return targetGeom
def addCircleGeom(rootNode, vertexCount, radius, color = Vec4(1.0, 1.0, 1.0, 1.0), layer = 0):
global GEO_ID
GN=GeomNode("Circle %s" % (GEO_ID))
GEO_ID += 1
NodePathGeom = rootNode.attachNewNode(GN)
geo = addCircle(GN, vertexCount, radius, color, layer)
return NodePathGeom, GN, geo
def addSquare(attachNode, sizeX, sizeY, color = Vec4(1.0, 1.0, 1.0, 1.0), layer = 0):
targetGN=GeomNode("Square Geom")
sX = sizeX / 2.0
sY = sizeY / 2.0
color1 = color
color2 = color
color3 = color
gFormat = GeomVertexFormat.getV3n3cpt2()
boxVertexData = GeomVertexData("vertices", gFormat, Geom.UHDynamic)
boxVertexWriter = GeomVertexWriter(boxVertexData, "vertex")
boxNormalWriter = GeomVertexWriter(boxVertexData, "normal")
boxColorWriter = GeomVertexWriter(boxVertexData, "color")
boxTextureWriter = GeomVertexWriter(boxVertexData, "texcoord")
boxVertexWriter.addData3f(-sX, sY, 0.0)
boxNormalWriter.addData3f(0, 0, 1)
boxColorWriter.addData4f(color[0], color[1], color[2], color[3])
boxTextureWriter.addData2f(0.0, 1.0)
boxVertexWriter.addData3f(-sX, -sY, 0.0)
boxNormalWriter.addData3f(0, 0, 1)
boxColorWriter.addData4f(color[0], color[1], color[2], color[3])
boxTextureWriter.addData2f(0.0, 0.0)
boxVertexWriter.addData3f(sX, -sY, 0.0)
boxNormalWriter.addData3f(0, 0, 1)
boxColorWriter.addData4f(color[0], color[1], color[2], color[3])
boxTextureWriter.addData2f(1.0, 0.0)
boxVertexWriter.addData3f(sX, sY, 0.0)
boxNormalWriter.addData3f(0, 0, 1)
boxColorWriter.addData4f(color[0], color[1], color[2], color[3])
boxTextureWriter.addData2f(1.0, 1.0)
boxTris=GeomTristrips(Geom.UHStatic) # trianglestrip obejcet
boxTris.addVertex(1)
boxTris.addVertex(2)
boxTris.addVertex(0)
boxTris.addVertex(3)
boxTris.closePrimitive()
boxGeom=Geom(boxVertexData)
boxGeom.addPrimitive(boxTris)
attachNode.addGeom(boxGeom)
return boxGeom
def addSquareGeom(rootNode, sizeX, sizeY, color = Vec4(1.0, 1.0, 1.0, 1.0), layer = 0):
global GEO_ID
GN=GeomNode("Square %s" % (GEO_ID))
GEO_ID += 1
NodePathGeom = rootNode.attachNewNode(GN)
geo = addSquare(GN, sizeX, sizeY, color, layer)
return NodePathGeom, GN, geo
def addBox(attachNode, sizeX, sizeY, sizeZ, color = Vec4(1.0, 1.0, 1.0, 1.0), darken = 0):
targetGN=GeomNode("Box Geom")
sX = sizeX / 2.0
sY = sizeY / 2.0
sZ = sizeZ / 2.0
color1 = color
color2 = color
color3 = color
if darken:
color1 = color * 0.75 #Vec4(0.0, 0.0, 0.0, 1.0)
color2 = color * 0.50 #Vec4(0.0, 0.0, 0.0, 1.0)
color3 = color * 0.25 #Vec4(0.0, 0.0, 0.0, 1.0)
gFormat = GeomVertexFormat.getV3n3cp()
boxVertexData = GeomVertexData("vertices", gFormat, Geom.UHDynamic)
boxVertexWriter = GeomVertexWriter(boxVertexData, "vertex")
boxNormalWriter = GeomVertexWriter(boxVertexData, "normal")
boxColorWriter = GeomVertexWriter(boxVertexData, "color")
#Front
boxVertexWriter.addData3f(sX, sY, sZ)
boxNormalWriter.addData3f(0, 1, 0)
boxColorWriter.addData4f(color[0], color[1], color[2], color[3])
boxVertexWriter.addData3f(sX, sY, -sZ)
boxNormalWriter.addData3f(0, 1, 0)
boxColorWriter.addData4f(color1[0], color1[1], color1[2], color1[3])
boxVertexWriter.addData3f(-sX, sY, -sZ)
boxNormalWriter.addData3f(0, 1, 0)
boxColorWriter.addData4f(color1[0], color1[1], color1[2], color1[3])
boxVertexWriter.addData3f(-sX, sY, sZ)
boxNormalWriter.addData3f(0, 1, 0)
boxColorWriter.addData4f(color1[0], color1[1], color1[2], color1[3])
#Back
boxVertexWriter.addData3f(-sX, -sY, sZ)
boxNormalWriter.addData3f(0, -1, 0)
boxColorWriter.addData4f(color2[0], color2[1], color2[2], color2[3])
boxVertexWriter.addData3f(-sX, -sY, -sZ)
boxNormalWriter.addData3f(0, -1, 0)
boxColorWriter.addData4f(color3[0], color3[1], color3[2], color3[3])
boxVertexWriter.addData3f(sX, -sY, -sZ)
boxNormalWriter.addData3f(0, -1, 0)
boxColorWriter.addData4f(color2[0], color2[1], color2[2], color2[3])
boxVertexWriter.addData3f(sX, -sY, sZ)
boxNormalWriter.addData3f(0, -1, 0)
boxColorWriter.addData4f(color2[0], color2[1], color2[2], color2[3])
#Top
boxVertexWriter.addData3f(-sX, sY, sZ)
boxNormalWriter.addData3f(0, 0, 1)
boxColorWriter.addData4f(color[0], color[1], color[2], color[3])
boxVertexWriter.addData3f(-sX, -sY, sZ)
boxNormalWriter.addData3f(0, 0, 1)
boxColorWriter.addData4f(color[0], color[1], color[2], color[3])
boxVertexWriter.addData3f(sX, -sY, sZ)
boxNormalWriter.addData3f(0, 0, 1)
boxColorWriter.addData4f(color[0], color[1], color[2], color[3])
boxVertexWriter.addData3f(sX, sY, sZ)
boxNormalWriter.addData3f(0, 0, 1)
boxColorWriter.addData4f(color[0], color[1], color[2], color[3])
#Bottom
boxVertexWriter.addData3f(sX, sY, -sZ)
boxNormalWriter.addData3f(0, 0, -1)
boxColorWriter.addData4f(color3[0], color3[1], color3[2], color3[3])
boxVertexWriter.addData3f(sX, -sY, -sZ)
boxNormalWriter.addData3f(0, 0, -1)
boxColorWriter.addData4f(color3[0], color3[1], color3[2], color3[3])
boxVertexWriter.addData3f(-sX, -sY, -sZ)
boxNormalWriter.addData3f(0, 0, -1)
boxColorWriter.addData4f(color3[0], color3[1], color3[2], color3[3])
boxVertexWriter.addData3f(-sX, sY, -sZ)
boxNormalWriter.addData3f(0, 0, -1)
boxColorWriter.addData4f(color3[0], color3[1], color3[2], color3[3])
#Right
boxVertexWriter.addData3f(sX, sY, sZ)
boxNormalWriter.addData3f(0, 0, 1)
boxColorWriter.addData4f(color[0], color[1], color[2], color[3])
boxVertexWriter.addData3f(sX, -sY, sZ)
boxNormalWriter.addData3f(0, 0, 1)
boxColorWriter.addData4f(color1[0], color1[1], color1[2], color1[3])
boxVertexWriter.addData3f(sX, -sY, -sZ)
boxNormalWriter.addData3f(0, 0, 1)
boxColorWriter.addData4f(color1[0], color1[1], color1[2], color1[3])
boxVertexWriter.addData3f(sX, sY, -sZ)
boxNormalWriter.addData3f(0, 0, 1)
boxColorWriter.addData4f(color1[0], color1[1], color1[2], color1[3])
#Left
boxVertexWriter.addData3f(-sX, sY, -sZ)
boxNormalWriter.addData3f(0, 0, 1)
boxColorWriter.addData4f(color2[0], color2[1], color2[2], color2[3])
boxVertexWriter.addData3f(-sX, -sY, -sZ)
boxNormalWriter.addData3f(0, 0, 1)
boxColorWriter.addData4f(color3[0], color3[1], color3[2], color3[3])
boxVertexWriter.addData3f(-sX, -sY, sZ)
boxNormalWriter.addData3f(0, 0, 1)
boxColorWriter.addData4f(color2[0], color2[1], color2[2], color2[3])
boxVertexWriter.addData3f(-sX, sY, sZ)
boxNormalWriter.addData3f(0, 0, 1)
boxColorWriter.addData4f(color2[0], color2[1], color2[2], color2[3])
boxTris=GeomTristrips(Geom.UHStatic) # trianglestrip obejcet
boxTris.addVertex(0)#(1)
boxTris.addVertex(1)#(2)
boxTris.addVertex(3)#(0)
boxTris.addVertex(2)#(3)
boxTris.closePrimitive()
boxTris.addVertex(5)
boxTris.addVertex(6)
boxTris.addVertex(4)
boxTris.addVertex(7)
boxTris.closePrimitive()
boxTris.addVertex(9)
boxTris.addVertex(10)
boxTris.addVertex(8)
boxTris.addVertex(11)
boxTris.closePrimitive()
boxTris.addVertex(13)
boxTris.addVertex(14)
boxTris.addVertex(12)
boxTris.addVertex(15)
boxTris.closePrimitive()
boxTris.addVertex(16) #(17)
boxTris.addVertex(17) #(18)
boxTris.addVertex(19) #(16)
boxTris.addVertex(18) #(19)
boxTris.closePrimitive()
boxTris.addVertex(21)
boxTris.addVertex(22)
boxTris.addVertex(20)
boxTris.addVertex(23)
boxTris.closePrimitive()
boxGeom=Geom(boxVertexData)
boxGeom.addPrimitive(boxTris)
attachNode.addGeom(boxGeom)
return boxGeom
def addBoxGeom(rootNode, sizeX, sizeY, sizeZ, color = Vec4(1.0, 1.0, 1.0, 1.0), darken = 0):
global GEO_ID
GN=GeomNode("Box %s" % (GEO_ID))
GEO_ID += 1
nodePathGeom = rootNode.attachNewNode(GN)
geo = addBox(GN, sizeX, sizeY, sizeZ, color, darken)
return nodePathGeom, GN, geo
def addArrow(attachNode, sizeX, sizeY, color = Vec4(1.0, 1.0, 1.0, 1.0), layer = 0):
targetGN=GeomNode("Arrow Geom")
sX = sizeX / 2.0
sY = sizeY / 2.0
color1 = color
color2 = color
color3 = color
gFormat = GeomVertexFormat.getV3n3cp()
boxVertexData = GeomVertexData("vertices", gFormat, Geom.UHDynamic)
boxVertexWriter = GeomVertexWriter(boxVertexData, "vertex")
boxNormalWriter = GeomVertexWriter(boxVertexData, "normal")
boxColorWriter = GeomVertexWriter(boxVertexData, "color")
boxVertexWriter.addData3f(-sX, sY, 0.0)
boxNormalWriter.addData3f(0, 0, 1)
boxColorWriter.addData4f(color[0], color[1], color[2], color[3])
boxVertexWriter.addData3f(-sX, -sY, 0.0)
boxNormalWriter.addData3f(0, 0, 1)
boxColorWriter.addData4f(color[0], color[1], color[2], color[3])
boxVertexWriter.addData3f(sX, -sY, 0.0)
boxNormalWriter.addData3f(0, 0, 1)
boxColorWriter.addData4f(color[0], color[1], color[2], color[3])
boxVertexWriter.addData3f(sX, sY, 0.0)
boxNormalWriter.addData3f(0, 0, 1)
boxColorWriter.addData4f(color[0], color[1], color[2], color[3])
boxTris=GeomTristrips(Geom.UHStatic) # trianglestrip obejcet
boxTris.addVertex(1)
boxTris.addVertex(2)
boxTris.addVertex(0)
boxTris.addVertex(3)
boxTris.closePrimitive()
boxVertexWriter.addData3f(-sX * 2.0, sY, 0.0)
boxNormalWriter.addData3f(0, 0, 1)
boxColorWriter.addData4f(color[0], color[1], color[2], color[3])
boxVertexWriter.addData3f(sX * 2.0, sY, 0.0)
boxNormalWriter.addData3f(0, 0, 1)
boxColorWriter.addData4f(color[0], color[1], color[2], color[3])
boxVertexWriter.addData3f(0.0, sY * 2.0, 0.0)
boxNormalWriter.addData3f(0, 0, 1)
boxColorWriter.addData4f(color[0], color[1], color[2], color[3])
boxTris.addVertex(4)
boxTris.addVertex(5)
boxTris.addVertex(6)
boxTris.closePrimitive()
boxGeom=Geom(boxVertexData)
boxGeom.addPrimitive(boxTris)
attachNode.addGeom(boxGeom)
return boxGeom
def addArrowGeom(rootNode, sizeX, sizeY, color = Vec4(1.0, 1.0, 1.0, 1.0), layer = 0):
global GEO_ID
GN=GeomNode("Arrow %s" % (GEO_ID))
GEO_ID += 1
NodePathGeom = rootNode.attachNewNode(GN)
geo = addArrow(GN, sizeX, sizeY, color, layer)
return NodePathGeom, GN, geo | [
"[email protected]"
] | |
0b34cc86c76f4247a8f4201fd28777566b4f57b8 | 9a5dcad85a9bd9ad020ba25577301ffcaf9ed1a3 | /scripts/cnes_grace_sync.py | 6ad1fd1759774760da55549ba7c0c1dffd6811ac | [
"CC-BY-4.0",
"MIT"
] | permissive | yaramohajerani/read-GRACE-harmonics | 7c8a6a6788d162bf807321bfc07aacb00a8e32fc | aa48506a64860809249164a9bcaebf679d41f6ff | refs/heads/main | 2023-01-24T08:17:52.900192 | 2020-12-10T00:16:10 | 2020-12-11T23:00:25 | 307,516,301 | 0 | 0 | MIT | 2020-10-26T22:06:22 | 2020-10-26T22:06:22 | null | UTF-8 | Python | false | false | 14,562 | py | #!/usr/bin/env python
u"""
cnes_grace_sync.py
Written by Tyler Sutterley (12/2020)
CNES/GRGS GRACE data download program for gravity field products
http://grgs.obs-mip.fr/grace
Downloads the tar file containing the CNES/GRGS GRACE data for a given release
Iterates through files to determine if any are not in the local file system
For any file to sync: copies from the tar file into a separate gzipped text file
following the data storage structure used with other GRACE products
Creates an index file for each data product
CALLING SEQUENCE:
python cnes_grace_sync.py --release RL04 RL05
OUTPUTS:
CNES RL01: GAC/GSM
CNES RL02: GAA/GAB/GSM
CNES RL03: GAA/GAB/GSM
CNES RL04: GSM
CNES RL05: GAA/GAB/GSM
COMMAND LINE OPTIONS:
--help: list the command line options
-D X, --directory: working data directory
-R X, --release X: CNES/GRGS data releases to sync
-C, --clobber: overwrite existing data in transfer
-M X, --mode X: Local permissions mode of the directories and files synced
-l, --log: output log of files downloaded
PROGRAM DEPENDENCIES:
utilities: download and management utilities for syncing files
UPDATE HISTORY:
Updated 12/2020: use argparse to set command line parameters
use gravity toolkit utilities to download tar files
update remote link paths for new structure
Updated 05/2019: set urllib version based on python version
Updated 10/2018: added RL04 of the GRGS/CNES product
Updated 07/2018: python3 compatible method for extracting last modified date
Updated 06/2018: using python3 compatible octal, input and urllib
Updated 05/2017: using os.makedirs to recursively create directories
Updated 04/2017: Sort members from tar file when iterating. Comments update
changed from using --rl01 and --rl02 options to setting with --release
minor changes to check_connection function to parallel other programs
Updated 02/2017: do not extract tar files to temp, extract contents of files
Updated 01/2017: v3 of the RL03 dataset: solves the problems identified at
the poles in RL03-v1 and the problem in C21/S21 identified in RL03-v2
between January 2003 and December 2012. Added MODE to set permissions
Updated 10/2016: added --directory option
Updated 09/2016: v2 of the RL03 dataset (improvements for high-latitudes)
compress output files with gzip to be similar to other GRACE data
placed copy file as gzip portion within function (gzip_copy_file)
Updated 06/2016: added clobber option and date check for individual files
(will only copy over newer or overwritten files if clobber is not set)
Updated 05-06/2016: using __future__ print function
Updated 03/2016: using getopt to set parameters, whether or not to output a
log file, added new help module. Updated for latest CNES hosts
Updated 08/2015: changed sys.exit to raise RuntimeError
Updated 01/2015: added internet connectivity check
added main definition for parameters
Updated 10/2014: updated for RL03
Individual files no longer available in GRACE format
for RL01 and RL02. Using tar files to copy all files to directory
removing RL01 as option
Updated 02/2014: quick code update for if statements
Updated 10/2013: interruptions less frequent but still occur.
Adding option to create sync logs. Will output both a file sync log
and a log of the messages from the wget command
Updated filepaths with path.join to standardize for different OS
Deleted commented lftp code (wget is much better at syncing over http)
May consider switching PO.DAAC sync program to wget to test over ftp
Updated 09/2013: added subprocess wait commands to prevent interruptions
in the system call
Updated 09/2013: switched back to wget as wget was much faster
at syncing over http. lftp code is commented and notes remain above.
non-verbose flag added to output only the files retrieved and errors
Updated 07/2013: switched to use lftp instead of wget
Requires less setup as lftp is used with PO.DAAC program
Updated 05/2013: converted to python
Updated 03/2013: sync each file with wget versus downloading archive file
added functionality for RL01 and RL03 (future release)
Written 07/2012
"""
from __future__ import print_function
import sys
import os
import re
import copy
import time
import gzip
import struct
import shutil
import tarfile
import argparse
import posixpath
import gravity_toolkit.utilities
#-- PURPOSE: sync local GRACE/GRACE-FO files with CNES server
def cnes_grace_sync(DIRECTORY, DREL=[], LOG=False, CLOBBER=False, MODE=None):
#-- remote CNES/GRGS host directory
HOST = ['http://gravitegrace.get.obs-mip.fr','grgs.obs-mip.fr','data']
#-- check if directory exists and recursively create if not
os.makedirs(DIRECTORY,MODE) if not os.path.exists(DIRECTORY) else None
#-- create dictionaries for dataset and host directory
DSET = {}
DSET['RL01'] = ['GSM', 'GAC']
DSET['RL02'] = ['GSM', 'GAA', 'GAB']
DSET['RL03'] = ['GSM', 'GAA', 'GAB']
DSET['RL04'] = ['GSM']
DSET['RL05'] = ['GSM', 'GAA', 'GAB']
#-- remote path to tar files on CNES servers
REMOTE = dict(RL01={},RL02={},RL03={},RL04={},RL05={})
#-- RL01: GSM and GAC
REMOTE['RL01']['GSM'] = ['RL01','variable','archives']
REMOTE['RL01']['GAC'] = ['RL01','variable','archives']
#-- RL02: GSM, GAA and GAB
REMOTE['RL02']['GSM'] = ['RL02','variable','archives']
REMOTE['RL02']['GAA'] = ['RL02','variable','archives']
REMOTE['RL02']['GAB'] = ['RL02','variable','archives']
#-- RL03: GSM, GAA and GAB
REMOTE['RL03']['GSM'] = ['RL03-v3','archives']
REMOTE['RL03']['GAA'] = ['RL03','variable','archives']
REMOTE['RL03']['GAB'] = ['RL03','variable','archives']
#-- RL04: GSM
REMOTE['RL04']['GSM'] = ['RL04-v1','archives']
#-- RL05: GSM, GAA, GAB for GRACE/GRACE-FO
REMOTE['RL05']['GSM'] = ['RL05','archives']
REMOTE['RL05']['GAA'] = ['RL05','archives']
REMOTE['RL05']['GAB'] = ['RL05','archives']
#-- tar file names for each dataset
TAR = dict(RL01={},RL02={},RL03={},RL04={},RL05={})
#-- RL01: GSM and GAC
TAR['RL01']['GSM'] = ['GRGS.SH_models.GRACEFORMAT.RL01.tar.gz']
TAR['RL01']['GAC'] = ['GRGS.dealiasing.RL01.tar.gz']
#-- RL02: GSM, GAA and GAB
TAR['RL02']['GSM'] = ['GRGS.SH_models.GRACEFORMAT.all.tar.gz']
TAR['RL02']['GAA'] = ['GRGS.dealiasing.GRACEFORMAT.all.tar.gz']
TAR['RL02']['GAB'] = ['GRGS.dealiasing.GRACEFORMAT.all.tar.gz']
#-- RL03: GSM, GAA and GAB
TAR['RL03']['GSM'] = ['CNES-GRGS.RL03-v3.monthly.coeff.tar.gz']
TAR['RL03']['GAA'] = ['GRGS.RL03.dealiasing.monthly.tar.gz']
TAR['RL03']['GAB'] = ['GRGS.RL03.dealiasing.monthly.tar.gz']
#-- RL04: GSM
# TAR['RL04']['GSM'] = ['CNES.RL04-v1.monthly.OLD_IERS2010_MEAN_POLE_CONVENTION.tar.gz']
TAR['RL04']['GSM'] = ['CNES.RL04-v1.monthly.NEW_IERS2010_MEAN_POLE_CONVENTION.tar.gz']
#-- RL05: GSM, GAA and GAB
TAR['RL05']['GSM'] = ['CNES-GRGS.RL05.GRACE.monthly.tar.gz',
'CNES-GRGS.RL05.GRACE-FO.monthly.tar.gz']
TAR['RL05']['GAA'] = ['CNES-GRGS.RL05.monthly.dealiasing.tar.gz']
TAR['RL05']['GAB'] = ['CNES-GRGS.RL05.monthly.dealiasing.tar.gz']
#-- create log file with list of synchronized files (or print to terminal)
if LOG:
#-- output to log file
#-- format: CNES_sync_2002-04-01.log
today = time.strftime('%Y-%m-%d',time.localtime())
LOGFILE = 'CNES_sync_{0}.log'.format(today)
fid1 = open(os.path.join(DIRECTORY,LOGFILE),'w')
print('CNES Sync Log ({0})'.format(today), file=fid1)
else:
#-- standard output (terminal output)
fid1 = sys.stdout
#-- DATA RELEASES (RL01, RL02, RL03, RL04)
#-- RL01 and RL02 are no longer updated as default
for rl in DREL:
#-- datasets (GSM, GAA, GAB)
for ds in DSET[rl]:
print('CNES/{0}/{1}'.format(rl, ds), file=fid1)
#-- specific GRACE directory
local_dir = os.path.join(DIRECTORY, 'CNES', rl, ds)
#-- check if GRACE directory exists and recursively create if not
os.makedirs(local_dir,MODE) if not os.path.exists(local_dir) else None
#-- retrieve each tar file from CNES
for t in TAR[rl][ds]:
remote_tar_path = copy.copy(HOST)
remote_tar_path.extend(REMOTE[rl][ds])
remote_tar_path.append(t)
#-- local copy of CNES data tar file
local_file = os.path.join(DIRECTORY, 'CNES', rl, t)
MD5 = gravity_toolkit.utilities.get_hash(local_file)
#-- copy remote tar file to local if new or updated
gravity_toolkit.utilities.from_http(remote_tar_path,
local=local_file, hash=MD5, chunk=16384, verbose=True,
fid=fid1, mode=MODE)
#-- Create and submit request to get modification time of file
remote_file = posixpath.join(*remote_tar_path)
request = gravity_toolkit.utilities.urllib2.Request(remote_file)
response = gravity_toolkit.utilities.urllib2.urlopen(request)
#-- change modification time to remote
time_string=response.headers['last-modified']
remote_mtime=gravity_toolkit.utilities.get_unix_time(time_string,
format='%a, %d %b %Y %H:%M:%S %Z')
#-- keep remote modification time of file and local access time
os.utime(local_file, (os.stat(local_file).st_atime, remote_mtime))
#-- open file with tarfile (read)
tar = tarfile.open(name=local_file, mode='r:gz')
#-- copy files from the tar file into the data directory
member_list=[m for m in tar.getmembers() if re.search(ds,m.name)]
#-- for each member of the dataset within the tar file
for member in member_list:
#-- local gzipped version of the file
fi = os.path.basename(member.name)
local_file = os.path.join(local_dir,'{0}.gz'.format(fi))
gzip_copy_file(fid1, tar, member, local_file, CLOBBER, MODE)
#-- close the tar file
tar.close()
#-- find GRACE files and sort by date
grace_files=[fi for fi in os.listdir(local_dir) if re.search(ds,fi)]
#-- outputting GRACE filenames to index
with open(os.path.join(local_dir,'index.txt'),'w') as fid:
for fi in sorted(grace_files):
print('{0}'.format(fi), file=fid)
#-- change permissions of index file
os.chmod(os.path.join(local_dir,'index.txt'), MODE)
#-- close log file and set permissions level to MODE
if LOG:
fid1.close()
os.chmod(os.path.join(DIRECTORY,LOGFILE), MODE)
#-- PURPOSE: copy file from tar file checking if file exists locally
#-- and if the original file is newer than the local file
def gzip_copy_file(fid, tar, member, local_file, CLOBBER, MODE):
#-- if file exists in file system: check if remote file is newer
TEST = False
OVERWRITE = ' (clobber)'
#-- last modification time of file within tar file
file1_mtime = member.mtime
#-- check if output compressed file exists in local directory
if os.access(local_file, os.F_OK):
#-- check last modification time of output gzipped file
with gzip.open(local_file, 'rb') as fileID:
fileobj = fileID.fileobj
fileobj.seek(4)
#-- extract little endian 4 bit unsigned integer
file2_mtime, = struct.unpack("<I", fileobj.read(4))
#-- if remote file is newer: overwrite the local file
if (file1_mtime > file2_mtime):
TEST = True
OVERWRITE = ' (overwrite)'
else:
TEST = True
OVERWRITE = ' (new)'
#-- if file does not exist, is to be overwritten, or CLOBBERed
if TEST or CLOBBER:
#-- Printing files copied from tar file to new compressed file
print('{0}/{1} --> '.format(tar.name,member.name), file=fid)
print('\t{0}{1}\n'.format(local_file,OVERWRITE), file=fid)
#-- extract file contents to new compressed file
f_in = tar.extractfile(member)
with gzip.GzipFile(local_file, 'wb', 9, None, file1_mtime) as f_out:
shutil.copyfileobj(f_in, f_out)
f_in.close()
#-- keep remote modification time of file and local access time
os.utime(local_file, (os.stat(local_file).st_atime, file1_mtime))
os.chmod(local_file, MODE)
#-- Main program that calls cnes_grace_sync()
def main():
#-- Read the system arguments listed after the program
parser = argparse.ArgumentParser(
description="""CNES/GRGS GRACE data download program for
gravity field products
"""
)
#-- command line parameters
#-- working data directory
parser.add_argument('--directory','-D',
type=lambda p: os.path.abspath(os.path.expanduser(p)),
default=os.getcwd(),
help='Working data directory')
#-- GRACE/GRACE-FO data release
parser.add_argument('--release','-r',
metavar='DREL', type=str, nargs='+',
default=['RL05'], choices=['RL01','RL02','RL03','RL04','RL05'],
help='GRACE/GRACE-FO data release')
#-- Output log file in form
#-- CNES_sync_2002-04-01.log
parser.add_argument('--log','-l',
default=False, action='store_true',
help='Output log file')
parser.add_argument('--clobber','-C',
default=False, action='store_true',
help='Overwrite existing data in transfer')
#-- permissions mode of the directories and files synced (number in octal)
parser.add_argument('--mode','-M',
type=lambda x: int(x,base=8), default=0o775,
help='Permission mode of directories and files synced')
args = parser.parse_args()
#-- check internet connection before attempting to run program
HOST = 'http://gravitegrace.get.obs-mip.fr'
if gravity_toolkit.utilities.check_connection(HOST):
cnes_grace_sync(args.directory, DREL=args.release, LOG=args.log,
CLOBBER=args.clobber, MODE=args.mode)
#-- run main program
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
be67615eecbd94519a382643f0573620f7f41288 | 5985dde8b1fd6e1f40bf51ccc9a4759cceff4f5a | /MobileNet/handler.py | 2a370fb1f9e8e6d4c98f5e5f8bd9ca067ed4dd04 | [] | no_license | johndpope/Session3-FaceRecognition | 9297811f337c24b3c5c999a8f31b17c5e4d915d6 | 66cc77a42b6e85c7e5d967fe660954ff4e097349 | refs/heads/master | 2022-12-04T09:56:53.323310 | 2020-08-16T18:03:15 | 2020-08-16T18:03:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,957 | py | try:
import unzip_requirements
except ImportError:
pass
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
import boto3
import os
import io
import json
import base64
from requests_toolbelt.multipart import decoder
print("Import End...")
S3_BUCKET = os.environ['S3_BUCKET'] if 'S3_BUCKET' in os.environ else 'evadebs1'
MODEL_PATH = os.environ['MODEL_PATH'] if 'MODEL_PATH' else 'model_mobilenet.pt'
print('Downloading model...')
s3 = boto3.client('s3')
print('Downloaded model...')
try:
if os.path.isfile(MODEL_PATH) != True:
print('ModelPath exists...')
obj = s3.get_object(Bucket=S3_BUCKET, Key=MODEL_PATH)
print('Creating ByteStream')
bytestream = io.BytesIO(obj['Body'].read())
print("Loading Model")
model = torch.jit.load(bytestream)
print("Model Loaded...")
except Exception as e:
print(repr(e))
raise(e)
print('model. is ready..')
def transform_image(image_bytes):
try:
transformations = transforms.Compose([
transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
image = Image.open(io.BytesIO(image_bytes))
return transformations(image).unsqueeze(0)
except Exception as e:
print(repr(e))
raise(e)
def get_prediction(image_bytes):
tensor = transform_image(image_bytes=image_bytes)
return model(tensor).argmax().item()
def classify_image(event, context):
try:
print('classify_image')
content_type_header = event['headers']['content-type']
print(event['body'])
body = base64.b64decode(event['body'])
print('BODY LOADED')
picture = decoder.MultipartDecoder(body, content_type_header).parts[0]
prediction = get_prediction(image_bytes=picture.content)
print(prediction)
filename = picture.headers[b'Content-Disposition'].decode().split(';')[1].split('=')[1]
if len(filename) < 4:
filename = picture.headers[b'Content-Disposition'].decode().split(';')[2].split('=')[1]
return {
'statusCode': 200,
'headers': {
'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Credentials': True
},
'body': json.dumps({'file': filename.replace('"', ''), 'predicted': prediction})
}
except Exception as e:
print(repr(e))
return {
"statusCode": 500,
"headers": {
'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Credentials': True
},
'body': json.dumps({'error': repr(e)})
}
| [
"[email protected]"
] | |
1554bdd605c0aae62a8a75aebbd755fc93e965bd | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2444/60763/297365.py | 38960911bc45bebc5c79af9980fd6a246755bedb | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | t = (''+input())
s = []
s.append(t[0:t.find('k')-2])
s.append(t[t.find('k'):t.find('t')-2])
s.append(t[t.find('t'):len(t)])
s1 = eval(s[0][s[0].find('['):len(s[0])])
k = int(s[1][s[1].rfind(' '):len(s[1])])
t = int(s[2][s[2].rfind(' '):len(s[2])])
isFit = False
for i in range(len(s1)):
for j in range(i+1,min(i+k,len(s1))):
if abs(s1[i]-s1[j]) <= t:
isFit = True
break
if isFit:
print('true')
else:
print('false') | [
"[email protected]"
] | |
f4319ed7847d2e481fad2b86448abaaccce5248e | 87e70b50a582c2bed372e2858438493ee1105905 | /Description/extracter.py | 26e7fa1f838684647bdef9800d1013df8d4dc72e | [
"MIT"
] | permissive | adi-797/AIAMI | 868b96545ffa5ef9ddb214960147c312a956d119 | 37ea2bf61e85bf879a0f4a1014f2e93b87301582 | refs/heads/master | 2020-04-10T13:07:43.580065 | 2017-05-18T22:39:38 | 2017-05-18T22:39:38 | 161,041,442 | 1 | 0 | MIT | 2018-12-09T13:19:48 | 2018-12-09T13:19:48 | null | UTF-8 | Python | false | false | 7,042 | py | # -*- coding: utf-8 -*-
import numpy as np
import scipy.stats
from pylab import imshow, show, figure
import essentia.standard
from utils import config
def extractAllDescriptors(signal):
"""
Extracts the descriptors expected for the analysis of a given audio file.
"""
described = {}
described['Silence'] = _silence = silence(signal)
signal = signal[config.hopSize * _silence[0]:config.hopSize * _silence[1]] / np.max(
signal) # Tomo solo la parte con sonido y promedio para que todas las señales sean parejas.
described['mfcc'] = mfccs(signal)
described['Inharmonicity'] = inharmonicity_tesis(signal)
described['Energy'] = energy(signal)
described['LogAttackTime'] = log_attack_time(signal)
described['Standard-Dev'] = standard_dev(signal)
described['Variance'] = variance(signal)
described['Skewness'] = skewness(signal)
described['kurtosis'] = kurtosis(signal)
# described['mfcc-1st'] = np.gradient(described['mfcc'])[1]
# described['mfcc-2nd'] = np.gradient(described['mfcc-1st'])[1]
described['Inharmonicity-1st'] = np.gradient(described['Inharmonicity'])
described['Inharmonicity-2nd'] = np.gradient(described['Inharmonicity-1st'])
described['mfcc-Std-f'], described['mfcc-Var-f'], described['mfcc-Skew-f'], described['mfcc-Kurt-f']\
= mfcc_std_frequency(described)
return described
def mfccs(audio, window_size=config.windowSize, fft_size=config.fftSize, hop_size=config.hopSize, plot=False):
"""
Calculates the mfcc for a given audio file.
"""
window_hann = essentia.standard.Windowing(size=window_size, type='hann')
spectrum = essentia.standard.Spectrum(
size=fft_size) # FFT() would return the complex FFT, here we just want the magnitude spectrum
mfcc = essentia.standard.MFCC(numberCoefficients=12, inputSize=fft_size / 2 + 1)
pool = essentia.Pool()
for frame in essentia.standard.FrameGenerator(audio, frameSize=fft_size, hopSize=hop_size):
mfcc_bands, mfcc_coefficients = mfcc(spectrum(window_hann(frame)))
pool.add('lowlevel.mfcc', mfcc_coefficients)
pool.add('lowlevel.mfcc_bands', mfcc_bands)
if plot:
imshow(pool['lowlevel.mfcc'].T[1:, :], aspect='auto')
show() # unnecessary if you started "ipython --pylab"
figure()
imshow(pool['lowlevel.mfcc_bands'].T, aspect='auto', interpolation='nearest')
# We ignored the first MFCC coefficient to disregard the power of the signal and only plot its spectral shape
return pool['lowlevel.mfcc'].T
def inharmonicity_tesis(audio, window_size=config.windowSize, spectrum_size=config.fftSize,
hop_size=config.hopSize, sample_rate=config.sampleRate):
""" Setting up everything """
window_bh = essentia.standard.Windowing(size=window_size, type='blackmanharris92')
spectrum = essentia.standard.Spectrum(size=spectrum_size) # magnitude spectrum
peaks = essentia.standard.SpectralPeaks(magnitudeThreshold=-120, sampleRate=sample_rate)
window_hann = essentia.standard.Windowing(size=window_size, type='hann')
pitch = essentia.standard.PitchYin(frameSize=window_size, sampleRate=sample_rate)
pitch_fft = essentia.standard.PitchYinFFT(frameSize=window_size, sampleRate=sample_rate)
harmonicpeaks = essentia.standard.HarmonicPeaks()
inharmonicity = essentia.standard.Inharmonicity()
vector_inharmonicity = np.array([])
""" Actual signal processing """
for frame in essentia.standard.FrameGenerator(audio, frameSize=spectrum_size, hopSize=hop_size):
frequency, amplitude = peaks(20 * np.log10(spectrum(window_bh(frame))))
if 0 in frequency:
frequency = np.array([x for x in frequency if x != 0]) # elimino la informacion sobre la energia en 0 Hz
amplitude = amplitude[1:len(
amplitude)] # Asumo que esta toda en la primer posicion, si no es asi va a saltar un error
if len(frequency) == 0:
continue
value_pitch, confidence = pitch(window_hann(frame))
value_pitch_fft, confidence_fft = pitch_fft(spectrum(window_hann(frame)))
if (confidence and confidence_fft) < 0.2:
continue
else:
if confidence > confidence_fft:
value_pitch = value_pitch
else:
value_pitch = value_pitch_fft
harmonic_frequencies, harmonic_magnitudes = harmonicpeaks(frequency, amplitude, value_pitch)
vector_inharmonicity = np.append(vector_inharmonicity,
inharmonicity(harmonic_frequencies, harmonic_magnitudes))
return vector_inharmonicity
def energy(audio):
return sum(audio * audio)
def log_attack_time(audio):
enveloped = essentia.standard.Envelope(attackTime=config.attackTime, releaseTime=config.releaseTime)(audio)
return essentia.standard.LogAttackTime(startAttackThreshold=config.startAttackThreshold)(enveloped)
def standard_dev(audio):
return np.std(audio)
def variance(audio):
return np.var(audio)
def skewness(audio):
return scipy.stats.skew(audio)
def kurtosis(audio):
return scipy.stats.kurtosis(audio)
def silence(audio, fft_size=config.fftSize, hop_size=config.hopSize):
"""
Detects the begining and the end of the audio file.
The output is a vector where the first 1 indicates the begining of the audio file and the last 1 the ending.
The threshold is set at 90dB under the maximum of the file.
The first 1 is set one frame before the real start of the sound.
"""
threshold = 90.0
real_threshold = 10.0 ** ((20.0 * np.log10(max(audio)) - threshold) / 20.0)
l = []
for frame in essentia.standard.FrameGenerator(audio, frameSize=fft_size, hopSize=hop_size):
if sum(frame * frame) >= real_threshold:
l.append(1)
else:
l.append(0)
start = l.index(1)
if start != 0:
start -= 1
end = len(l) - l[::-1].index(1)
if end != len(l):
end += 1
return [start, end]
def mfcc_std_frequency(described):
std = []
var = []
kurt = []
skew = []
inter = []
for iii in range(len(described['mfcc'][0])):#temporal
for jjj in range(len(described['mfcc'])):#frecuencial
inter.append(described['mfcc'][jjj][iii])
std.append(np.std(inter)) # Desviacion estandar de cada frame entre todas las frecuencias.
var.append(np.var(inter))
skew.append(scipy.stats.skew(inter))
kurt.append(scipy.stats.kurtosis(inter))
return std, var, skew, kurt
def extractor(signal, sample_rate=config.sampleRate):
"""
Extracts pretty much every descriptors in Essentia.
"""
algor = essentia.standard.Extractor(sampleRate=sample_rate, dynamicsFrameSize=4096, dynamicsHopSize=2048,
lowLevelFrameSize=2048, lowLevelHopSize=1024, namespace="Tesis", rhythm=False)
output = algor(signal)
return output
| [
"[email protected]"
] | |
41764b41e79692b1c50cae5f31e2951e9799da89 | aef40813a1b92cec0ea4fc25ec1d4a273f9bfad4 | /Q05__/29_Minesweeper/test.py | e58826b09c60caa3dfe49e81f51a85d1227372c6 | [
"Apache-2.0"
] | permissive | hsclinical/leetcode | e9d0e522e249a24b28ab00ddf8d514ec855110d7 | 48a57f6a5d5745199c5685cd2c8f5c4fa293e54a | refs/heads/main | 2023-06-14T11:28:59.458901 | 2021-07-09T18:57:44 | 2021-07-09T18:57:44 | 319,078,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 210 | py | #!/usr/bin/python
from Solution import Solution
obj = Solution()
A = [["E","E","E","E","E"],["E","E","M","E","E"],["E","E","E","E","E"],["E","E","E","E","E"]]
B = [3,0]
out = obj.updateBoard(A, B)
print(out)
| [
"[email protected]"
] | |
4c7e8d890fdb26a9cb63bf0c0f019cabf548f029 | ae33eba00ff1e0742d74948a38a779cabb825a84 | /tests/spdx/test_main.py | 00e0910044d704a32244bf58fc1231526e67f6f7 | [
"LicenseRef-scancode-proprietary-license",
"LGPL-3.0-or-later",
"LicenseRef-scancode-free-unknown",
"LGPL-3.0-only",
"MIT"
] | permissive | renovate-tests/poetry | a687672448c4baa1027b2b36bf46c1f9af5107e7 | 7c9569cd13c151c7682a7bfda0f1ec9e2ff07100 | refs/heads/master | 2020-12-15T14:13:40.895896 | 2020-06-30T13:34:46 | 2020-06-30T13:34:46 | 233,995,693 | 0 | 0 | MIT | 2020-01-15T11:24:06 | 2020-01-15T04:07:17 | Python | UTF-8 | Python | false | false | 1,111 | py | import pytest
from poetry.spdx import license_by_id
def test_license_by_id():
license = license_by_id("MIT")
assert license.id == "MIT"
assert license.name == "MIT License"
assert license.is_osi_approved
assert not license.is_deprecated
license = license_by_id("LGPL-3.0-or-later")
assert license.id == "LGPL-3.0-or-later"
assert license.name == "GNU Lesser General Public License v3.0 or later"
assert license.is_osi_approved
assert not license.is_deprecated
def test_license_by_id_is_case_insensitive():
license = license_by_id("mit")
assert license.id == "MIT"
license = license_by_id("miT")
assert license.id == "MIT"
def test_license_by_id_with_full_name():
license = license_by_id("GNU Lesser General Public License v3.0 or later")
assert license.id == "LGPL-3.0-or-later"
assert license.name == "GNU Lesser General Public License v3.0 or later"
assert license.is_osi_approved
assert not license.is_deprecated
def test_license_by_id_invalid():
with pytest.raises(ValueError):
license_by_id("invalid")
| [
"[email protected]"
] | |
03f4010173fe4b454467e5826a41e957adbd509e | 27ff56afaeff7cf6f38cf457896b50dee90b1397 | /test_code.py | eec8fe0efeceeec91ee94e8978ef8d89f7e7c3dd | [] | no_license | romulovitor/Dica_Python_Linkedin | 55352bdc7c76c1ce7b88d0e5e36be37bca7dd466 | 9c1e5cf26681188935e0b3a41070960fe5dfd9b8 | refs/heads/master | 2023-03-13T04:14:37.778946 | 2021-03-01T10:43:04 | 2021-03-01T10:43:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 644 | py | import socket
import sys
from geolite2 import geolite2
host = ' '.join(sys.argv[1:])
ip = socket.gethostbyname(host)
reader = geolite2.reader()
def get_ip_location(ip):
location = reader.get(ip)
try:
country = location["country"]["names"]["en"]
except:
country = "Unknown"
try:
subdivision = location["subdivisions"][0]["names"]["en"]
except:
subdivision = "Unknown"
try:
city = location["city"]["names"]["en"]
except:
city = "Unknown"
return country, subdivision, city
country, sub, city = get_ip_location(ip=ip)
print(country)
print(sub)
print(city)
| [
"[email protected]"
] | |
fc06980868a73d533603e5f088b0902c2a59e497 | 0c4e5d83718644a0698c8a9faf08eab80932403d | /spacy-en_core_web_sm/test_data.py | 3bba0e85c799f67adf1912a8d8da079ddba8123e | [
"Apache-2.0"
] | permissive | danielfrg/conda-recipes | 8a00f931345fce1d8e0f5f07a78314290c4766d8 | 1d4f4ae8eba54d007659b359f0c9e0ea2c52eb0a | refs/heads/master | 2021-01-01T19:49:46.149994 | 2019-05-17T04:42:46 | 2019-05-17T04:42:46 | 98,701,788 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 576 | py | import sys
import spacy
from spacy import util
from spacy.deprecated import resolve_model_name
def is_present(name):
data_path = util.get_data_path()
model_name = resolve_model_name(name)
model_path = data_path / model_name
return model_path.exists()
assert is_present('en') is True
# Test for english
nlp = spacy.load('en')
doc = nlp('London is a big city in the United Kingdom.')
assert doc[0].text == 'London'
assert doc[0].ent_iob > 0
assert doc[0].ent_type_ == 'GPE'
assert doc[1].text == 'is'
assert doc[1].ent_iob > 0
assert doc[1].ent_type_ == ''
| [
"[email protected]"
] | |
40527f639f34417df017ec57ac702370d48eec2b | 5af277b5819d74e61374d1d78c303ac93c831cf5 | /meta_reward_learning/semantic_parsing/table/wtq/evaluator.py | 9be5311c098cdf6e92c6ab57719614b77386feab | [
"Apache-2.0"
] | permissive | Ayoob7/google-research | a2d215afb31513bd59bc989e09f54667fe45704e | 727ec399ad17b4dd1f71ce69a26fc3b0371d9fa7 | refs/heads/master | 2022-11-11T03:10:53.216693 | 2020-06-26T17:13:45 | 2020-06-26T17:13:45 | 275,205,856 | 2 | 0 | Apache-2.0 | 2020-06-26T16:58:19 | 2020-06-26T16:58:18 | null | UTF-8 | Python | false | false | 13,718 | py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
"""Copy and extended from WikiTableQuestions dataset release."""
u"""Official Evaluator for WikiTableQuestions Dataset
There are 3 value types
1. String (unicode)
2. Number (float)
3. Date (a struct with 3 fields: year, month, and date)
Some fields (but not all) can be left unspecified. However, if only the year
is specified, the date is automatically converted into a number.
Target denotation = a set of items
- Each item T is a raw unicode string from Mechanical Turk
- If T can be converted to a number or date (via Stanford CoreNLP), the
converted value (number T_N or date T_D) is precomputed
Predicted denotation = a set of items
- Each item P is a string, a number, or a date
- If P is read from a text file, assume the following
- A string that can be converted into a number (float) is converted into a
number
- A string of the form "yyyy-mm-dd" is converted into a date. Unspecified
fields can be marked as "xx". For example, "xx-01-02" represents the date
January 2nd of an unknown year.
- Otherwise, it is kept as a string
The predicted denotation is correct if
1. The sizes of the target denotation and the predicted denotation are equal
2. Each item in the target denotation matches an item in the predicted
denotation
A target item T matches a predicted item P if one of the following is true:
1. normalize(raw string of T) and normalize(string form of P) are identical.
The normalize method performs the following normalizations on strings:
- Remove diacritics (é → e)
- Convert smart quotes (‘’´`“”) and dashes (‐‑‒–—−) into
ASCII ones
- Remove citations (trailing •♦†‡*#+ or [...])
- Remove details in parenthesis (trailing (...))
- Remove outermost quotation marks
- Remove trailing period (.)
- Convert to lowercase
- Collapse multiple whitespaces and strip outermost whitespaces
2. T can be interpreted as a number T_N, P is a number, and P = T_N
3. T can be interpreted as a date T_D, P is a date, and P = T_D
(exact match on all fields; e.g., xx-01-12 and 1990-01-12 do not match)
"""
__version__ = '1.0.2'
from abc import ABCMeta
from abc import abstractmethod
import argparse
import codecs
from math import isinf
from math import isnan
import os
import re
import sys
import unicodedata
from six import string_types
from tensorflow.compat.v1 import gfile
################ String Normalization ################
def normalize(x):
"""String Normalization."""
if not isinstance(x, unicode):
x = x.decode('utf8', errors='ignore')
# Remove diacritics
x = ''.join(
c for c in unicodedata.normalize('NFKD', x)
if unicodedata.category(c) != 'Mn')
# Normalize quotes and dashes
x = re.sub(ur'[‘’´`]', "'", x)
x = re.sub(ur'[“”]', '"', x)
x = re.sub(ur'[‐‑‒–—−]', '-', x)
while True:
old_x = x
# Remove citations
x = re.sub(ur'((?<!^)\[[^\]]*\]|\[\d+\]|[•♦†‡*#+])*$', '',
x.strip())
# Remove details in parenthesis
x = re.sub(ur'(?<!^)( \([^)]*\))*$', '', x.strip())
# Remove outermost quotation mark
x = re.sub(ur'^"([^"]*)"$', r'\1', x.strip())
if x == old_x:
break
# Remove final '.'
if x and x[-1] == '.':
x = x[:-1]
# Collapse whitespaces and convert to lower case
x = re.sub(ur'\s+', ' ', x, flags=re.U).lower().strip()
return x
################ Value Types ################
class Value(object):
__metaclass__ = ABCMeta
# Should be populated with the normalized string
_normalized = None
@abstractmethod
def match(self, other):
"""Return True if the value matches the other value.
Args: other (Value)
Returns:
Bool: a boolean
"""
pass
@property
def normalized(self):
return self._normalized
class StringValue(Value):
def __init__(self, content):
assert isinstance(content, string_types)
self._normalized = normalize(content)
self._hash = hash(self._normalized)
def __eq__(self, other):
return isinstance(other,
StringValue) and self.normalized == other.normalized
def __hash__(self):
return self._hash
def __str__(self):
return 'S' + str([self.normalized])
__repr__ = __str__
def match(self, other):
assert isinstance(other, Value)
return self.normalized == other.normalized
class NumberValue(Value):
def __init__(self, amount, original_string=None):
assert isinstance(amount, (int, long, float))
if abs(amount - round(amount)) < 1e-6:
self._amount = int(amount)
else:
self._amount = float(amount)
if not original_string:
self._normalized = unicode(self._amount)
else:
self._normalized = normalize(original_string)
self._hash = hash(self._amount)
@property
def amount(self):
return self._amount
def __eq__(self, other):
return isinstance(other, NumberValue) and self.amount == other.amount
def __hash__(self):
return self._hash
def __str__(self):
return ('N(%f)' % self.amount) + str([self.normalized])
__repr__ = __str__
def match(self, other):
assert isinstance(other, Value)
if self.normalized == other.normalized:
return True
if isinstance(other, NumberValue):
return abs(self.amount - other.amount) < 1e-6
return False
@staticmethod
def parse(text):
"""Try to parse into a number.
Return:
the number (int or float) if successful; otherwise None.
"""
try:
return int(text)
except:
try:
amount = float(text)
assert not isnan(amount) and not isinf(amount)
return amount
except:
return None
class DateValue(Value):
def __init__(self, year, month, day, original_string=None):
"""Create a new DateValue. Placeholders are marked as -1."""
assert isinstance(year, int)
assert isinstance(month, int) and (month == -1 or 1 <= month <= 12)
assert isinstance(day, int) and (day == -1 or 1 <= day <= 31)
assert not (year == month == day == -1)
self._year = year
self._month = month
self._day = day
if not original_string:
self._normalized = '{}-{}-{}'.format(year if year != -1 else 'xx',
month if month != -1 else 'xx',
day if day != '-1' else 'xx')
else:
self._normalized = normalize(original_string)
self._hash = hash((self._year, self._month, self._day))
@property
def ymd(self):
return (self._year, self._month, self._day)
def __eq__(self, other):
return isinstance(other, DateValue) and self.ymd == other.ymd
def __hash__(self):
return self._hash
def __str__(self):
return (('D(%d,%d,%d)' % (self._year, self._month, self._day)) + str(
[self._normalized]))
__repr__ = __str__
def match(self, other):
assert isinstance(other, Value)
if self.normalized == other.normalized:
return True
if isinstance(other, DateValue):
return self.ymd == other.ymd
return False
@staticmethod
def parse(text):
"""Try to parse into a date.
Return:
tuple (year, month, date) if successful; otherwise None.
"""
try:
ymd = text.lower().split('-')
assert len(ymd) == 3
year = -1 if ymd[0] in ('xx', 'xxxx') else int(ymd[0])
month = -1 if ymd[1] == 'xx' else int(ymd[1])
day = -1 if ymd[2] == 'xx' else int(ymd[2])
assert not (year == month == day == -1)
assert month == -1 or 1 <= month <= 12
assert day == -1 or 1 <= day <= 31
return (year, month, day)
except:
return None
################ Value Instantiation ################
def to_value(original_string, corenlp_value=None):
"""Convert the string to Value object.
Args:
original_string (string_types): Original string
corenlp_value (string_types): Optional value returned from CoreNLP
Returns:
Value
"""
if isinstance(original_string, Value):
# Already a Value
return original_string
if not corenlp_value:
corenlp_value = original_string
# Number?
amount = NumberValue.parse(corenlp_value)
if amount is not None:
return NumberValue(amount, original_string)
# Date?
ymd = DateValue.parse(corenlp_value)
if ymd is not None:
if ymd[1] == ymd[2] == -1:
return NumberValue(ymd[0], original_string)
else:
return DateValue(ymd[0], ymd[1], ymd[2], original_string)
# String.
return StringValue(original_string)
def to_value_list(original_strings, corenlp_values=None):
"""Convert a list of strings to a list of Values
Args:
original_strings: (list[string_types])
corenlp_values: (list[string_types or None])
Returns:
list[Value]
"""
assert isinstance(original_strings, (list, tuple, set))
if corenlp_values is not None:
assert isinstance(corenlp_values, (list, tuple, set))
assert len(original_strings) == len(corenlp_values)
return list(
set(to_value(x, y) for (x, y) in zip(original_strings, corenlp_values)))
else:
return list(set(to_value(x) for x in original_strings))
################ Check the Predicted Denotations ################
def check_denotation(target_values, predicted_values):
"""Return True if the predicted denotation is correct.
Args:
target_values: (list[Value])
predicted_values: (list[Value])
Returns:
bool
"""
# Check size
if len(target_values) != len(predicted_values):
return False
# Check items
for target in target_values:
if not any(target.match(pred) for pred in predicted_values):
return False
return True
################ Batch Mode ################
def tsv_unescape(x):
"""Unescape strings in the TSV file.
Escaped characters include:
newline (0x10) -> backslash + n
vertical bar (0x7C) -> backslash + p
backslash (0x5C) -> backslash + backslash
Args: x (str or unicode)
Returns:
a unicode
"""
return x.replace(r'\n', '\n').replace(r'\p', '|').replace('\\\\', '\\')
def tsv_unescape_list(x):
"""Unescape a list in the TSV file.
List items are joined with vertical bars (0x5C)
Args: x (str or unicode)
Returns:
a list of unicodes
"""
return [tsv_unescape(y) for y in x.split('|')]
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'-t',
'--tagged-dataset-path',
default=os.path.join('.', 'tagged', 'data'),
help='Directory containing CoreNLP-tagged dataset TSV file')
parser.add_argument(
'prediction_path',
help='Path to the prediction file. Each line contains '
'ex_id <tab> item1 <tab> item2 <tab> ...')
args = parser.parse_args()
# ID string --> list[Value]
target_values_map = {}
for filename in os.listdir(args.tagged_dataset_path):
filename = os.path.join(args.tagged_dataset_path, filename)
print >> sys.stderr, 'Reading dataset from', filename
with codecs.getreader('utf-8')(gfile.GFile(filename, 'r')) as fin:
header = fin.readline().rstrip('\n').split('\t')
for line in fin:
stuff = dict(zip(header, line.rstrip('\n').split('\t')))
ex_id = stuff['id']
original_strings = tsv_unescape_list(stuff['targetValue'])
canon_strings = tsv_unescape_list(stuff['targetCanon'])
target_values_map[ex_id] = to_value_list(original_strings,
canon_strings)
print >> sys.stderr, 'Read', len(target_values_map), 'examples'
print >> sys.stderr, 'Reading predictions from', args.prediction_path
num_examples, num_correct = 0, 0
with codecs.getreader('utf-8')(gfile.GFile(args.prediction_path, 'r')) as fin:
for line in fin:
line = line.rstrip('\n').split('\t')
ex_id = line[0]
if ex_id not in target_values_map:
print 'WARNING: Example ID "%s" not found' % ex_id
else:
target_values = target_values_map[ex_id]
predicted_values = to_value_list(line[1:])
correct = check_denotation(target_values, predicted_values)
print u'%s\t%s\t%s\t%s' % (ex_id, correct, target_values,
predicted_values)
num_examples += 1
if correct:
num_correct += 1
print >> sys.stderr, 'Examples:', num_examples
print >> sys.stderr, 'Correct:', num_correct
print >> sys.stderr, 'Accuracy:', round(
(num_correct + 1e-9) / (num_examples + 1e-9), 4)
# Added utility functions for computing preprocessing answers and computing
# rewards.
def target_values_map(target_value, target_cannon):
original_strings = tsv_unescape_list(target_value)
canon_strings = tsv_unescape_list(target_cannon)
target_values = to_value_list(original_strings, canon_strings)
return target_values
def check_prediction(ts_prediction_string, target_values):
predicted_values = to_value_list(ts_prediction_string)
correct = check_denotation(target_values, predicted_values)
return correct
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
cb4ebb211a087c41d1a00d5f5749e3a1bf9ef481 | 27c9b374a75550252ddfe5da400fad891c6de590 | /chars/link_scripts/Camera.py | ca10858645dead4a643555b156084f88dd92eff2 | [] | no_license | Dynamique-Zak/Zelda_BlenderGame | 03065416939deb3ce18007909ccc278c736baad0 | 0f5d5d15bfa79e9f8ea15f0ebcb76bce92f77a21 | refs/heads/master | 2016-08-13T00:12:34.746520 | 2016-02-19T23:18:27 | 2016-02-19T23:18:27 | 49,572,402 | 30 | 16 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | from bge import logic
scene = logic.getCurrentScene()
def obstacle(cont):
ray = cont.sensors["RayForward"]
if ray.positive:
hitObj = ray.hitObject
if hitObj.name != "Link":
cam = scene.objects['MainCam']
logic.camObstaclePosition = ray.hitPosition
cam.worldPosition = logic.camObstaclePosition
else:
logic.camObstaclePosition = None
| [
"[email protected]"
] | |
dd6f62558664e5e4b170e264cc8c392da2512c42 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/peering/v20210601/get_connection_monitor_test.py | 3a6694d9417c7518103e56ca0869515a10726815 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,852 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetConnectionMonitorTestResult',
'AwaitableGetConnectionMonitorTestResult',
'get_connection_monitor_test',
'get_connection_monitor_test_output',
]
@pulumi.output_type
class GetConnectionMonitorTestResult:
"""
The Connection Monitor Test class.
"""
def __init__(__self__, destination=None, destination_port=None, id=None, is_test_successful=None, name=None, path=None, provisioning_state=None, source_agent=None, test_frequency_in_sec=None, type=None):
if destination and not isinstance(destination, str):
raise TypeError("Expected argument 'destination' to be a str")
pulumi.set(__self__, "destination", destination)
if destination_port and not isinstance(destination_port, int):
raise TypeError("Expected argument 'destination_port' to be a int")
pulumi.set(__self__, "destination_port", destination_port)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if is_test_successful and not isinstance(is_test_successful, bool):
raise TypeError("Expected argument 'is_test_successful' to be a bool")
pulumi.set(__self__, "is_test_successful", is_test_successful)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if path and not isinstance(path, list):
raise TypeError("Expected argument 'path' to be a list")
pulumi.set(__self__, "path", path)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if source_agent and not isinstance(source_agent, str):
raise TypeError("Expected argument 'source_agent' to be a str")
pulumi.set(__self__, "source_agent", source_agent)
if test_frequency_in_sec and not isinstance(test_frequency_in_sec, int):
raise TypeError("Expected argument 'test_frequency_in_sec' to be a int")
pulumi.set(__self__, "test_frequency_in_sec", test_frequency_in_sec)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def destination(self) -> Optional[str]:
"""
The Connection Monitor test destination
"""
return pulumi.get(self, "destination")
@property
@pulumi.getter(name="destinationPort")
def destination_port(self) -> Optional[int]:
"""
The Connection Monitor test destination port
"""
return pulumi.get(self, "destination_port")
@property
@pulumi.getter
def id(self) -> str:
"""
The ID of the resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="isTestSuccessful")
def is_test_successful(self) -> bool:
"""
The flag that indicates if the Connection Monitor test is successful or not.
"""
return pulumi.get(self, "is_test_successful")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def path(self) -> Sequence[str]:
"""
The path representing the Connection Monitor test.
"""
return pulumi.get(self, "path")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="sourceAgent")
def source_agent(self) -> Optional[str]:
"""
The Connection Monitor test source agent
"""
return pulumi.get(self, "source_agent")
@property
@pulumi.getter(name="testFrequencyInSec")
def test_frequency_in_sec(self) -> Optional[int]:
"""
The Connection Monitor test frequency in seconds
"""
return pulumi.get(self, "test_frequency_in_sec")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetConnectionMonitorTestResult(GetConnectionMonitorTestResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetConnectionMonitorTestResult(
destination=self.destination,
destination_port=self.destination_port,
id=self.id,
is_test_successful=self.is_test_successful,
name=self.name,
path=self.path,
provisioning_state=self.provisioning_state,
source_agent=self.source_agent,
test_frequency_in_sec=self.test_frequency_in_sec,
type=self.type)
def get_connection_monitor_test(connection_monitor_test_name: Optional[str] = None,
peering_service_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetConnectionMonitorTestResult:
"""
The Connection Monitor Test class.
:param str connection_monitor_test_name: The name of the connection monitor test
:param str peering_service_name: The name of the peering service.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['connectionMonitorTestName'] = connection_monitor_test_name
__args__['peeringServiceName'] = peering_service_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:peering/v20210601:getConnectionMonitorTest', __args__, opts=opts, typ=GetConnectionMonitorTestResult).value
return AwaitableGetConnectionMonitorTestResult(
destination=__ret__.destination,
destination_port=__ret__.destination_port,
id=__ret__.id,
is_test_successful=__ret__.is_test_successful,
name=__ret__.name,
path=__ret__.path,
provisioning_state=__ret__.provisioning_state,
source_agent=__ret__.source_agent,
test_frequency_in_sec=__ret__.test_frequency_in_sec,
type=__ret__.type)
@_utilities.lift_output_func(get_connection_monitor_test)
def get_connection_monitor_test_output(connection_monitor_test_name: Optional[pulumi.Input[str]] = None,
peering_service_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetConnectionMonitorTestResult]:
"""
The Connection Monitor Test class.
:param str connection_monitor_test_name: The name of the connection monitor test
:param str peering_service_name: The name of the peering service.
:param str resource_group_name: The name of the resource group.
"""
...
| [
"[email protected]"
] | |
dc07aacdc0cad289a408330ade332418e2a6e981 | ddd466457316662a1455bae429740eb3c8411444 | /intro/3_6_cond_if_bonus_pints.py | 9deed81970a0407735ec582a44c78263e764d6b5 | [] | no_license | fingerman/python_fundamentals | 9ef46e51d6e9b8328e9c949fa0f807f30bd6e482 | 1fb604220922530d1171200a3cf3a927c028a6ed | refs/heads/master | 2023-01-09T12:02:26.712810 | 2020-01-22T16:12:32 | 2020-01-22T16:12:32 | 151,728,846 | 0 | 0 | null | 2022-12-27T15:34:12 | 2018-10-05T13:58:10 | Python | UTF-8 | Python | false | false | 197 | py | a = int(input())
b = 0
if a <= 100:
b = 5
if a > 100:
b = 0.2*a
if a > 1000:
b = 0.1*a
if a % 2 == 0:
b += 1
if a % 10 == 5:
b += 2
print(b)
print(a+b)
| [
"[email protected]"
] | |
18b6b603c569eb741010cb429b1f20452ae52845 | ee427ddfe0514ec7bc8ebe94013a52bc29385728 | /manage.py | 86865ec4b74232d772e1bdcc83aadb618dc5418a | [] | no_license | Saviodiow95/django-popup | ed6705bab6f9d2875c9bd274ffca76d399e465a7 | 12e3f83f08da93abb64d18fe2e228e525914b901 | refs/heads/master | 2021-05-25T20:37:28.998639 | 2020-04-07T21:05:38 | 2020-04-07T21:05:38 | 253,911,561 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 625 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'popup.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
ab916be697d2f95c148df57c3d60fd791d1f76dd | ca6efbd1754b4d65ef9595c648b9d766c96abcbe | /douyin_spider/handler/video.py | 0a67086353e78e9db4d0f1aa2ac8d7eb91403367 | [] | no_license | JaleeLi/douyin_spider | cc30b7bceb62d1440b97de99406b035c0aff9c06 | aa3df5b0fddc633d4c6ae21c509514ccb664fbb2 | refs/heads/master | 2020-05-25T03:52:51.863799 | 2019-05-16T12:49:00 | 2019-05-16T12:49:00 | 187,615,023 | 1 | 0 | null | 2019-05-20T09:59:25 | 2019-05-20T09:59:25 | null | UTF-8 | Python | false | false | 442 | py | from douyin_spider.handler.media import MediaHandler
from douyin_spider.models.video import Video
class VideoHandler(MediaHandler):
"""
video handler,handle video item
"""
async def handle(self, item, **kwargs):
"""
handle item use VideoHandler
:param item:
:param kwargs:
:return:
"""
if isinstance(item, Video):
return await self.process(item, **kwargs)
| [
"[email protected]"
] | |
46fb77e37f4b85850d94f91b29ec1d63e33535c5 | 0901a62c11d1ba11df4cee28bb3fa2b32398a7d8 | /django_blog/users/views.py | 3a24e8b5fc8be5121b2b36f0ec416cbd5d68b871 | [] | no_license | mattg317/django_blog | 80df4e46b687edf83ddd67e9cbda1f62c62ec6a0 | bf60759e252f1f169a8ee26e5fba70c73ecc48fa | refs/heads/master | 2022-12-21T21:26:24.401316 | 2019-03-08T16:16:57 | 2019-03-08T16:16:57 | 166,888,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,857 | py | from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from .forms import UserRegisterForm, UserUpdateForm, ProfileUpdateForm
def register(request):
if request.method == 'POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
# save the form
form.save()
username = form.cleaned_data.get('username')
# created a message for succes
messages.success(request, f'Your account has been created! You are now able to login!')
return redirect('login')
else:
form = UserRegisterForm()
return render(request, 'users/register.html', {'form': form})
# requires user to be logged in to view this page
@login_required
def profile(request):
# when submitted pass in new data
if request.method == 'POST':
# import forms from form.py
# pass in post data, with profile form getting file data with request the image
# populate current user with instance
u_form = UserUpdateForm(request.POST, instance=request.user)
p_form = ProfileUpdateForm(request.POST,
request.FILES,
instance=request.user.profile)
# Save if both forms are valid
if u_form.is_valid() and p_form.is_valid():
u_form.save()
p_form.save()
messages.success(request, f'Your account has been updated!')
return redirect('profile')
else:
u_form = UserUpdateForm(instance=request.user)
p_form = ProfileUpdateForm(instance=request.user.profile)
# create context to pass in
context = {
'u_form': u_form,
'p_form': p_form
}
return render(request, 'users/profile.html', context)
| [
"[email protected]"
] | |
9f07ff6a092fb565e50403ae26d8059fc6a6a492 | ae81b16cf4242d329dfcb055e85fafe87262cc7f | /leetcode/NoIdea/383. 赎金信.py | 20c36c9cacc2ffd270e4a627f23bd2d4438920e1 | [] | no_license | coquelin77/PyProject | 3d2d3870b085c4b7ff41bd200fe025630969ab8e | 58e84ed8b3748c6e0f78184ab27af7bff3778cb8 | refs/heads/master | 2023-03-18T19:14:36.441967 | 2019-06-19T02:44:22 | 2019-06-19T02:44:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,545 | py | '''给定一个赎金信 (ransom) 字符串和一个杂志(magazine)字符串,判断第一个字符串ransom能不能由第二个字符串magazines里面的字符构成。如果可以构成,返回 true ;否则返回 false。
(题目说明:为了不暴露赎金信字迹,要从杂志上搜索各个需要的字母,组成单词来表达意思。)
注意:
你可以假设两个字符串均只含有小写字母。
canConstruct("a", "b") -> false
canConstruct("aa", "ab") -> false
canConstruct("aa", "aab") -> true'''
# class Solution:
# def canConstruct(self, ransomNote: 'str', magazine: 'str') -> 'bool':
# pass
#
# if __name__ == '__main__':
# ransomNote = "aa"
# magazine = "aa"
# r = list(ransomNote)
# m = list(magazine)
# for i in range(0, len(m) - 1):
# for j in range(0, len(r)):
# if m[i] == r[j]:
# pass
# else:
# m += 1
# if i == len(m) or j == len(r):
# print('1')
# else:
# print('2')
class Solution:
def canConstruct(self, ransomNote, magazine):
"""
:type ransomNote: str
:type magazine: str
:rtype: bool
"""
have_done=[]
for i in range(len(ransomNote)):
if ransomNote[i] not in have_done:
if ransomNote.count(ransomNote[i])<=magazine.count(ransomNote[i]):
pass
else:
return False
have_done.append(ransomNote[i])
return True | [
"[email protected]"
] | |
816535d45c0c52df62f91b8f07462791b8636d11 | 0547d1826e99eedb959a3463520d73985a3b844e | /Data Science for Everyone Track/21-Supervised Learning with scikit-learn/01-Classification/06-The digits recognition dataset.py | fbce23254c4a24f165f8aa42379d00b6e1fd15e8 | [] | no_license | abhaysinh/Data-Camp | 18031f8fd4ee199c2eff54a408c52da7bdd7ec0f | 782c712975e14e88da4f27505adf4e5f4b457cb1 | refs/heads/master | 2022-11-27T10:44:11.743038 | 2020-07-25T16:15:03 | 2020-07-25T16:15:03 | 282,444,344 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,828 | py | '''
The digits recognition dataset
Up until now, you have been performing binary classification, since the target variable had two possible outcomes.
Hugo, however, got to perform multi-class classification in the videos, where the target variable could take on
three possible outcomes. Why does he get to have all the fun?! In the following exercises, you'll be working
with the MNIST digits recognition dataset, which has 10 classes, the digits 0 through 9! A reduced version of
the MNIST dataset is one of scikit-learn's included datasets, and that is the one we will use in this exercise.
Each sample in this scikit-learn dataset is an 8x8 image representing a handwritten digit. Each pixel is represented
by an integer in the range 0 to 16, indicating varying levels of black. Recall that scikit-learn's built-in datasets
are of type Bunch, which are dictionary-like objects. Helpfully for the MNIST dataset, scikit-learn provides an
'images' key in addition to the 'data' and 'target' keys that you have seen with the Iris data. Because it is a 2D
array of the images corresponding to each sample, this 'images' key is useful for visualizing the images, as you'll
see in this exercise (for more on plotting 2D arrays, see Chapter 2 of DataCamp's course on Data Visualization with Python).
On the other hand, the 'data' key contains the feature array - that is, the images as a flattened array of 64 pixels.
Notice that you can access the keys of these Bunch objects in two different ways: By using the . notation, as in
digits.images, or the [] notation, as in digits['images'].
For more on the MNIST data, check out this exercise in Part 1 of DataCamp's Importing Data in Python course.
There, the full version of the MNIST dataset is used, in which the images are 28x28.
It is a famous dataset in machine learning and computer vision, and frequently used as a benchmark to evaluate
the performance of a new model.
INSTRUCTIONS
100XP
1 Import datasets from sklearn and matplotlib.pyplot as plt.
2 Load the digits dataset using the .load_digits() method on datasets.
3 Print the keys and DESCR of digits.
4 Print the shape of images and data keys using the . notation.
5 Display the 1010th image using plt.imshow().
This has been done for you, so hit 'Submit Answer' to see which handwritten digit this happens to be!
'''
# Import necessary modules
from sklearn import datasets
import matplotlib.pyplot as plt
# Load the digits dataset: digits
digits = datasets.load_digits()
# Print the keys and DESCR of the dataset
print(digits.keys())
print(digits.DESCR)
# Print the shape of the images and data keys
print(digits.images.shape)
print(digits.data.shape)
# Display digit 1010
plt.imshow(digits.images[1010], cmap=plt.cm.gray_r, interpolation='nearest')
plt.show() | [
"[email protected]"
] | |
9ee6c69083ccaf093a1dbc5105d1ea6cc399ede3 | 0e91030c47071029d978dbfb9e7a30ae6826afe5 | /web/web_po/web_po_v4/Common/basepage.py | de7e3e53144ca52b72036ecc9720f8e8233391b4 | [] | no_license | liqi629/python_lemon | 095983fadda3639b058043b399180d19f899284b | bc5e6e6c92561ba9cec2798b7735505b377e9cd6 | refs/heads/master | 2023-02-04T00:57:09.447008 | 2020-12-27T14:46:31 | 2020-12-27T14:46:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,440 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import logging
import time
import os
from web.web_po.web_po_v4.Common.dir_config import screenshot_dir
# 目标:任务一个步骤都能实现-异常捕获、日志输出、失败截图
class BasePage: # 页面对象类需要继承BasePage
def __init__(self, driver: WebDriver):
self.driver = driver
# 等待元素可见/出现
def wait_eleVisible(self, loc, timeout=10, frequncy=0.5, doc=""):
start = time.time()
try:
WebDriverWait(self.driver, timeout, frequncy).until(EC.visibility_of_element_located(loc))
except:
logging.exception("等待元素{}出现,超时!".format(loc)) # 关于日志---之前在接口测试讲过了,这里只是简单输出
# self.sava_png(doc="等待元素超时") # 这里doc也可以继续从外层传入
self.sava_png(doc)
raise # 异常抛出---这里是一定要抛出的,因为这里捕获后自己处理了,如果不抛出,就不会报错,代码会继续执行。
else:
end = time.time()
duration = end - start # 这里需要再转化为s,未写完整
logging.info("等待元素{}出现,等待时间为{}秒。".format(loc, duration))
# 等待元素存在
def wait_elePresence(self, loc, timeout=10, frequncy=0.5, doc=""):
start = time.time()
try:
WebDriverWait(self.driver, timeout, frequncy).until(EC.presence_of_element_located(loc))
except:
logging.exception("等待元素{}存在,超时!".format(loc))
self.sava_png(doc)
raise
else:
end = time.time()
duration = end - start # 这里需要再转化为s,未写完整
logging.info("等待元素{}存在,等待时间为{}秒。".format(loc, duration))
# 保存截图
# 补充:
# 1.webdriver的截图方法是仅针对web页面的,是无法截取windows截图的
# 2.不要用特殊符号存储
def sava_png(self, doc=""): # doc是截图名字?类名?用例名?
# 文件名=页面_操作_时间.png
now = time.strftime("%Y_%m_%D-%H_%M_%S", time.time())
# path = screenshot_dir # 未实现截图,先用简单的路径的试试
path = "./Outputs/screenshot"
if not os.path.exists(path): # 判断文件是否存在,可以单独写在一个函数中
os.makedirs(path)
filename = path + "/{}_{}.png".format(doc, now)
# 截图操作本身也可能会出现失败的情况
try:
self.driver.save_screenshot(filename)
except:
logging.exception("截图失败!")
else:
logging.info("截图成功!保存为:{}".format(filename))
# 查找元素
def get_element(self, loc, doc=""):
try:
ele = self.driver.find_element(*loc)
except:
logging.exception("查找元素{}失败!".format(loc))
self.sava_png(doc)
raise
else:
# logging.info("查找元素{},成功!".format(loc))
logging.info("查找{}的元素{}成功!".format(doc, loc)) # 也可以通过doc具体到哪个页面的哪个元素
return ele
# 输入文本
def input_text(self, loc, text, timeout=10, frequncy=0.5, doc=""):
# 前提1:元素可见
# 前提2:找到它
self.wait_eleVisible(loc, timeout, frequncy, doc)
ele = self.get_element(loc, doc)
try:
ele.send_keys(text)
except:
logging.exception("向元素{}输入:'{}'失败!".format(loc, text))
self.sava_png(doc)
raise
else:
logging.info("向元素{}输入:'{}'成功!".format(loc, text))
# 点击元素
def click(self, loc, timeout=10, frequncy=0.5, doc=""):
self.wait_eleVisible(loc, timeout, frequncy, doc)
ele = self.get_element(loc, doc)
try:
ele.click()
except:
logging.exception("点击元素{}失败!".format(loc))
self.sava_png(doc)
raise
else:
logging.info("点击元素{}成功!".format(loc))
# 获取元素文本
def get_element_text(self, loc, timeout=10, frequncy=0.5, doc=""):
# 前提1:元素存在(不一定可见,建议这里改成存在-再写一个等待存在的方法;如果没有元素隐藏的情况,两种都可以)
# 前提2:找到它
self.wait_eleVisible(loc, timeout, frequncy, doc)
# self.wait_elePresence(loc, timeout, frequncy, doc)
ele = self.get_element(loc, doc)
try:
text = ele.text
except:
logging.exception("获取元素{}文本失败!".format(loc))
self.sava_png(doc)
raise
else:
logging.info("获取元素{}文本成功!".format(loc))
return text
# 获取元素属性
def get_element_attr(self):
pass
# 不一定要把所有方法封装,可以需要的时候再封装;
# 之后学习robotframework的时候,这块已经封装好了,可以再借鉴一下。
# select
# iframe
# windows
# upload
# 以下举例说明:
# 切换frame
def switch_to_frame(self, locator, timeout=10):
# 等待frame可见、切换
try:
WebDriverWait(self.driver, timeout).until(EC.frame_to_be_available_and_switch_to_it(locator)) # 等待+切换
except:
logging.exception("切换到frame元素{}失败!".format(locator))
self.sava_png()
raise
else:
logging.info("切换到frame元素{}成功!".format(locator))
# 切换window #new #main #index
def switch_to_window(self, index):
# 获取一次窗口列表、触发新窗口、获取所有窗口、再切换 (前2个步骤可以在函数之前实现,然后再调用函数)
if index == "new": # 代表切换到新窗口
pass
elif index == "main": # 代表切换到第一个窗口
pass
else:
pass
| [
"[email protected]"
] | |
253f5a0085daaac0ec6ef0174ac93391d02e11f3 | 5d0ebc19b778ca0b2e02ac7c5d4a8d5bf07d9b23 | /astropy/cosmology/tests/test_units.py | 130aa03c830c0fba759cb6eddf1ee7ade2ae00a5 | [
"BSD-3-Clause"
] | permissive | adivijaykumar/astropy | 8ea621f20b9c8363b2701c825c526d650c05258c | 0fd7ae818fed3abe4c468170a507d52ef91dc7e8 | refs/heads/main | 2021-12-03T08:12:33.558975 | 2021-09-03T15:07:49 | 2021-09-03T15:07:49 | 402,863,896 | 0 | 1 | BSD-3-Clause | 2021-09-03T18:25:55 | 2021-09-03T18:25:54 | null | UTF-8 | Python | false | false | 8,121 | py | # -*- coding: utf-8 -*-
"""Testing :mod:`astropy.cosmology.units`."""
##############################################################################
# IMPORTS
import contextlib
import pytest
import astropy.cosmology.units as cu
import astropy.units as u
from astropy.cosmology import Planck13, default_cosmology
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.compat.optional_deps import HAS_ASDF, HAS_SCIPY
from astropy.utils.exceptions import AstropyDeprecationWarning
##############################################################################
# TESTS
##############################################################################
def test_has_expected_units():
"""
Test that this module has the expected set of units. Some of the units are
imported from :mod:`astropy.units`, or vice versa. Here we test presence,
not usage. Units from :mod:`astropy.units` are tested in that module. Units
defined in :mod:`astropy.cosmology` will be tested subsequently.
"""
with pytest.warns(AstropyDeprecationWarning, match="`littleh`"):
assert u.astrophys.littleh is cu.littleh
def test_has_expected_equivalencies():
"""
Test that this module has the expected set of equivalencies. Many of the
equivalencies are imported from :mod:`astropy.units`, so here we test
presence, not usage. Equivalencies from :mod:`astropy.units` are tested in
that module. Equivalencies defined in :mod:`astropy.cosmology` will be
tested subsequently.
"""
with pytest.warns(AstropyDeprecationWarning, match="`with_H0`"):
assert u.equivalencies.with_H0 is cu.with_H0
def test_littleh():
H0_70 = 70 * u.km / u.s / u.Mpc
h70dist = 70 * u.Mpc / cu.littleh
assert_quantity_allclose(h70dist.to(u.Mpc, cu.with_H0(H0_70)), 100 * u.Mpc)
# make sure using the default cosmology works
cosmodist = default_cosmology.get().H0.value * u.Mpc / cu.littleh
assert_quantity_allclose(cosmodist.to(u.Mpc, cu.with_H0()), 100 * u.Mpc)
# Now try a luminosity scaling
h1lum = 0.49 * u.Lsun * cu.littleh ** -2
assert_quantity_allclose(h1lum.to(u.Lsun, cu.with_H0(H0_70)), 1 * u.Lsun)
# And the trickiest one: magnitudes. Using H0=10 here for the round numbers
H0_10 = 10 * u.km / u.s / u.Mpc
# assume the "true" magnitude M = 12.
# Then M - 5*log_10(h) = M + 5 = 17
withlittlehmag = 17 * (u.mag - u.MagUnit(cu.littleh ** 2))
assert_quantity_allclose(withlittlehmag.to(u.mag, cu.with_H0(H0_10)), 12 * u.mag)
@pytest.mark.skipif(not HAS_SCIPY, reason="Cosmology needs scipy")
def test_dimensionless_redshift():
"""Test the equivalency ``dimensionless_redshift``."""
z = 3 * cu.redshift
val = 3 * u.one
# show units not equal
assert z.unit == cu.redshift
assert z.unit != u.one
# test equivalency enabled by default
assert z == val
# also test that it works for powers
assert (3 * cu.redshift ** 3) == val
# and in composite units
assert (3 * u.km / cu.redshift ** 3) == 3 * u.km
# test it also works as an equivalency
with u.set_enabled_equivalencies([]): # turn off default equivalencies
assert z.to(u.one, equivalencies=cu.dimensionless_redshift()) == val
with pytest.raises(ValueError):
z.to(u.one)
# if this fails, something is really wrong
with u.add_enabled_equivalencies(cu.dimensionless_redshift()):
assert z == val
@pytest.mark.skipif(not HAS_SCIPY, reason="Cosmology needs scipy")
def test_redshift_temperature():
"""Test the equivalency ``with_redshift``."""
cosmo = Planck13.clone(Tcmb0=3 * u.K)
default_cosmo = default_cosmology.get()
z = 15 * cu.redshift
Tcmb = cosmo.Tcmb(z)
# 1) Default (without specifying the cosmology)
with default_cosmology.set(cosmo):
equivalency = cu.redshift_temperature()
assert_quantity_allclose(z.to(u.K, equivalency), Tcmb)
assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z)
# showing the answer changes if the cosmology changes
# this test uses the default cosmology
equivalency = cu.redshift_temperature()
assert_quantity_allclose(z.to(u.K, equivalency), default_cosmo.Tcmb(z))
assert default_cosmo.Tcmb(z) != Tcmb
# 2) Specifying the cosmology
equivalency = cu.redshift_temperature(cosmo)
assert_quantity_allclose(z.to(u.K, equivalency), Tcmb)
assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z)
# Test `atzkw`
equivalency = cu.redshift_temperature(cosmo, ztol=1e-10)
assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z)
@pytest.mark.skipif(not HAS_SCIPY, reason="Cosmology needs scipy")
class Test_with_redshift:
@pytest.fixture
def cosmo(self):
return Planck13.clone(Tcmb0=3 * u.K)
# ===========================================
def test_cosmo_different(self, cosmo):
default_cosmo = default_cosmology.get()
assert default_cosmo != cosmo # shows changing default
def test_no_equivalency(self, cosmo):
"""Test the equivalency ``with_redshift`` without any enabled."""
z = 15 * cu.redshift
equivalency = cu.with_redshift(Tcmb=False)
assert len(equivalency) == 0
# -------------------------------------------
def test_temperature_off(self, cosmo):
"""Test the equivalency ``with_redshift``."""
default_cosmo = default_cosmology.get()
z = 15 * cu.redshift
Tcmb = cosmo.Tcmb(z)
# 1) Default (without specifying the cosmology)
with default_cosmology.set(cosmo):
equivalency = cu.with_redshift(Tcmb=False)
with pytest.raises(u.UnitConversionError, match="'redshift' and 'K'"):
z.to(u.K, equivalency)
# 2) Specifying the cosmology
equivalency = cu.with_redshift(cosmo, Tcmb=False)
with pytest.raises(u.UnitConversionError, match="'redshift' and 'K'"):
z.to(u.K, equivalency)
def test_temperature(self, cosmo):
"""Test the equivalency ``with_redshift``."""
default_cosmo = default_cosmology.get()
z = 15 * cu.redshift
Tcmb = cosmo.Tcmb(z)
# 1) Default (without specifying the cosmology)
with default_cosmology.set(cosmo):
equivalency = cu.with_redshift(Tcmb=True)
assert_quantity_allclose(z.to(u.K, equivalency), Tcmb)
assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z)
# showing the answer changes if the cosmology changes
# this test uses the default cosmology
equivalency = cu.with_redshift(Tcmb=True)
assert_quantity_allclose(z.to(u.K, equivalency), default_cosmo.Tcmb(z))
assert default_cosmo.Tcmb(z) != Tcmb
# 2) Specifying the cosmology
equivalency = cu.with_redshift(cosmo, Tcmb=True)
assert_quantity_allclose(z.to(u.K, equivalency), Tcmb)
assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z)
# Test `atzkw`
# this is really just a test that 'atzkw' doesn't fail
equivalency = cu.with_redshift(cosmo, Tcmb=True, atzkw={"ztol": 1e-10})
assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z)
# FIXME! get "dimensionless_redshift", "with_redshift" to work in this
# they are not in ``astropy.units.equivalencies``, so the following fails
@pytest.mark.skipif(not HAS_ASDF, reason="requires ASDF")
@pytest.mark.parametrize("equiv", [cu.with_H0])
def test_equivalencies_asdf(tmpdir, equiv):
from asdf.tests import helpers
tree = {"equiv": equiv()}
with (
pytest.warns(AstropyDeprecationWarning, match="`with_H0`")
if equiv.__name__ == "with_H0"
else contextlib.nullcontext()
):
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_equivalency_context_manager():
base_registry = u.get_current_unit_registry()
# check starting with only the dimensionless_redshift equivalency.
assert len(base_registry.equivalencies) == 1
assert str(base_registry.equivalencies[0][0]) == "redshift"
| [
"[email protected]"
] | |
1efcba01d6b7890c5dfab687f6d1b9fd2ee7078e | 00453a0be06ecc8760a3c36ea302bc1f047a644f | /convoy/validator.py | 71f599c7cbcf201ed971c30366577c93774384e2 | [
"MIT"
] | permissive | Nepomuceno/batch-shipyard | 40c40e01bb49550eb1090f56cdf3da91b21a1bbb | 2d67411257e0501ac4443f44e4d27e4a8262be8d | refs/heads/master | 2020-03-18T18:53:47.824023 | 2018-05-30T05:55:50 | 2018-05-30T06:04:43 | 135,121,497 | 0 | 0 | null | 2018-05-28T06:54:56 | 2018-05-28T06:54:56 | null | UTF-8 | Python | false | false | 3,388 | py | #!/usr/bin/env python3
# Copyright (c) Microsoft Corporation
#
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# compat imports
from __future__ import absolute_import, division, print_function
from builtins import ( # noqa
bytes, dict, int, list, object, range, str, ascii, chr, hex, input,
next, oct, open, pow, round, super, filter, map, zip)
# stdlib imports
import enum
import logging
import sys
try:
import pathlib2 as pathlib
except ImportError:
import pathlib
import warnings
# non-stdlib imports
import pykwalify.core
import pykwalify.errors
import ruamel.yaml
# local imports
import convoy.util
# create logger
logger = logging.getLogger(__name__)
# enums
class ConfigType(enum.Enum):
Credentials = 1,
Global = 2,
Pool = 3,
Jobs = 4,
RemoteFS = 5,
# global defines
_ROOT_PATH = pathlib.Path(__file__).resolve().parent.parent
_SCHEMAS = {
ConfigType.Credentials: {
'name': 'Credentials',
'schema': pathlib.Path(_ROOT_PATH, 'schemas/credentials.yaml'),
},
ConfigType.Global: {
'name': 'Global',
'schema': pathlib.Path(_ROOT_PATH, 'schemas/config.yaml'),
},
ConfigType.Pool: {
'name': 'Pool',
'schema': pathlib.Path(_ROOT_PATH, 'schemas/pool.yaml'),
},
ConfigType.Jobs: {
'name': 'Jobs',
'schema': pathlib.Path(_ROOT_PATH, 'schemas/jobs.yaml'),
},
ConfigType.RemoteFS: {
'name': 'RemoteFS',
'schema': pathlib.Path(_ROOT_PATH, 'schemas/fs.yaml'),
},
}
# configure loggers
_PYKWALIFY_LOGGER = logging.getLogger('pykwalify')
convoy.util.setup_logger(_PYKWALIFY_LOGGER)
_PYKWALIFY_LOGGER.setLevel(logging.CRITICAL)
convoy.util.setup_logger(logger)
# ignore ruamel.yaml warning
warnings.simplefilter('ignore', ruamel.yaml.error.UnsafeLoaderWarning)
def validate_config(config_type, config_file):
if config_file is None or not config_file.exists():
return
schema = _SCHEMAS[config_type]
validator = pykwalify.core.Core(
source_file=str(config_file),
schema_files=[str(schema['schema'])]
)
validator.strict_rule_validation = True
try:
validator.validate(raise_exception=True)
except pykwalify.errors.SchemaError as e:
logger.error('{} Configuration {}'.format(schema['name'], e.msg))
sys.exit(1)
| [
"[email protected]"
] | |
ad980ce29098e193a27877629a6e43235f3d06e7 | 19768aa46de8fa5a52639826a80959e65f7b8e66 | /authapp/banned.py | 2096d5f8efc79aeb3f85391923d07dd43f713383 | [] | no_license | hongmingu/macawl | 0aff0a0d55acb11f06e979df2dee995941cdd5d0 | 49acead1290dd977263cb4086a621feed083fc40 | refs/heads/master | 2020-04-13T02:09:44.228294 | 2019-02-13T11:24:38 | 2019-02-13T11:24:38 | 162,822,145 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py | BANNED_PASSWORD_LIST = [
'password', 'qwerty', 'superman', '123456', '1234567', '12345678', '123456789', '1234567890', '012345', '0123456',
'01234567', '012345678', '0123456789', 'macawl', '111111', 'aaaaaa'
]
BANNED_USERNAME_LIST = [
'macawl', 'robots.txt',
]
| [
"[email protected]"
] | |
ae9d90732494a776b87eca0fe505628f0a211f2c | 8a7ef686efdd9add693986be08d9a1b8f561236e | /core/RestAPI/utils.py | 46c1af8a9eb7902cfbec167e5ed9f51ba1307bba | [
"MIT"
] | permissive | thebao/kalliope | 3b0cffdcd746f107e4557e53407196eae1194f5e | a820e85ee56c9e3a34e1130e32ccef52cd58e9f9 | refs/heads/master | 2021-01-13T13:58:46.217018 | 2016-11-04T11:02:34 | 2016-11-04T11:02:34 | 72,927,345 | 0 | 0 | null | 2016-11-05T13:19:11 | 2016-11-05T13:19:11 | null | UTF-8 | Python | false | false | 1,045 | py | from functools import wraps
from flask import request, Response
from core.ConfigurationManager import SettingLoader
def check_auth(username, password):
"""This function is called to check if a username /
password combination is valid.
"""
settings = SettingLoader.get_settings()
return username == settings.rest_api.login and password == settings.rest_api.password
def authenticate():
"""Sends a 401 response that enables basic auth"""
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
settings = SettingLoader.get_settings()
if settings.rest_api.password_protected:
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
return decorated
| [
"[email protected]"
] | |
d48efa115fb2ec6bccb9a0f884cb915557399918 | e8d42df817835b5fa83829ac6937887de127faa1 | /images6/exif.py | 4e1cb31848eda13e9290f99fb92271bf13c288c8 | [
"BSD-2-Clause",
"MIT"
] | permissive | eblade/images6 | 98f8dda84b1754ee1a306914dd2cf57a56d251bd | d68f79988a2b195a870efcc0bb368a7de474a2bc | refs/heads/master | 2020-04-03T22:04:45.715969 | 2019-07-29T17:25:57 | 2019-07-29T17:25:57 | 59,519,861 | 0 | 0 | null | 2016-11-02T23:23:57 | 2016-05-23T21:33:37 | Python | UTF-8 | Python | false | false | 1,989 | py | """Helper functions to convert exif data into better formats"""
orientation2angle = {
'Horizontal (normal)': (None, 0),
'Mirrored horizontal': ('H', 0),
'Rotated 180': (None, 180),
'Mirrored vertical': ('V', 0),
'Mirrored horizontal then rotated 90 CCW': ('H', -90),
'Rotated 90 CCW': (None, -90),
'Mirrored horizontal then rotated 90 CW': ('H', 90),
'Rotated 90 CW': (None, 90),
}
def exif_position(exif):
"""Reads exifread tags and extracts a float tuple (lat, lon)"""
lat = exif.get("GPS GPSLatitude")
lon = exif.get("GPS GPSLongitude")
if None in (lat, lon):
return None, None
lat = dms_to_float(lat)
lon = dms_to_float(lon)
if None in (lat, lon):
return None, None
if exif.get('GPS GPSLatitudeRef').printable == 'S':
lat *= -1
if exif.get('GPS GPSLongitudeRef').printable == 'S':
lon *= -1
return lat, lon
def dms_to_float(p):
"""Converts exifread data points to decimal GPX floats"""
try:
degree = p.values[0]
minute = p.values[1]
second = p.values[2]
return (
float(degree.num)/float(degree.den) +
float(minute.num)/float(minute.den)/60 +
float(second.num)/float(second.den)/3600
)
except AttributeError:
return None
def exif_string(exif, key):
p = exif.get(key)
if p:
return p.printable.strip()
def exif_int(exif, key):
p = exif.get(key)
if p:
return int(p.printable or 0)
def exif_ratio(exif, key):
p = exif.get(key)
try:
if p:
p = p.values[0]
return int(p.num), int(p.den)
except AttributeError:
if isinstance(p, int):
return p
def exif_orientation(exif):
orientation = exif.get("Image Orientation")
if orientation is None:
return None, None, 0
mirror, angle = orientation2angle.get(orientation.printable)
return orientation.printable, mirror, angle
| [
"[email protected]"
] | |
6ef7593eaf4523b81bda588bc7f80fcb2b2b1983 | a680b681210a070ff6ac3eab4ed3ea5a125991d6 | /setup.py | 971ef4d637bfcefe9f5b51d97ad073c92ddc4408 | [
"BSD-2-Clause"
] | permissive | moonbirdxp/PSpider | bb6da1de6a78d86ee8704b6eb8981773a1a31d8c | 4d7238b4ebafd129ecc5dd1095ce1ece313945ec | refs/heads/master | 2020-06-22T03:46:17.320420 | 2019-07-16T03:26:20 | 2019-07-16T04:44:17 | 197,624,496 | 0 | 0 | BSD-2-Clause | 2019-10-15T01:04:19 | 2019-07-18T16:48:09 | Python | UTF-8 | Python | false | false | 502 | py | # _*_ coding: utf-8 _*_
"""
install script: python3 setup.py install
"""
from setuptools import setup, find_packages
setup(
name="spider",
version="2.4.1",
author="xianhu",
keywords=["spider", "crawler", "multi-threads", "multi-processes", "proxies"],
packages=find_packages(exclude=("test.*",)),
package_data={
"": ["*.conf"], # include all *.conf files
},
install_requires=[
"pybloom_live>=3.0.0", # pybloom-live, fork from pybloom
]
)
| [
"[email protected]"
] | |
8665cf78ce4dd184cdd2109946d8f8d25e435330 | b26c41926fa3a7c2c061132d80e91a2750f2f468 | /tensorflow_probability/python/experimental/nn/variational_base.py | d6493e1392c98610b0dde840bd906e29b61b0ba5 | [
"Apache-2.0"
] | permissive | tensorflow/probability | 22e679a4a883e408f8ef237cda56e3e3dfa42b17 | 42a64ba0d9e0973b1707fcd9b8bd8d14b2d4e3e5 | refs/heads/main | 2023-09-04T02:06:08.174935 | 2023-08-31T20:30:00 | 2023-08-31T20:31:33 | 108,053,674 | 4,055 | 1,269 | Apache-2.0 | 2023-09-13T21:49:49 | 2017-10-23T23:50:54 | Jupyter Notebook | UTF-8 | Python | false | false | 7,602 | py | # Copyright 2019 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Base class for variational layers for building neural networks."""
import collections
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import random as tfp_random
from tensorflow_probability.python.distributions import distribution as distribution_lib
from tensorflow_probability.python.distributions import independent as independent_lib
from tensorflow_probability.python.distributions import mvn_diag as mvn_diag_lib
from tensorflow_probability.python.distributions import normal as normal_lib
from tensorflow_probability.python.experimental.nn import layers as layers_lib
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.util.seed_stream import SeedStream
__all__ = [
'VariationalLayer',
]
# The following aliases ensure docstrings read more succinctly.
tfd = distribution_lib
def unpack_kernel_and_bias(weights):
"""Returns `kernel`, `bias` tuple."""
if isinstance(weights, collections.abc.Mapping):
kernel = weights.get('kernel', None)
bias = weights.get('bias', None)
elif len(weights) == 1:
kernel, bias = weights, None
elif len(weights) == 2:
kernel, bias = weights
else:
raise ValueError('Unable to unpack weights: {}.'.format(weights))
return kernel, bias
class VariationalLayer(layers_lib.Layer):
"""Base class for all variational layers."""
def __init__(
self,
posterior,
prior,
activation_fn=None,
posterior_value_fn=tfd.Distribution.sample,
seed=None,
dtype=tf.float32,
validate_args=False,
name=None):
"""Base class for variational layers.
Args:
posterior: ...
prior: ...
activation_fn: ...
posterior_value_fn: ...
seed: ...
dtype: ...
validate_args: ...
name: Python `str` prepeneded to ops created by this object.
Default value: `None` (i.e., `type(self).__name__`).
"""
super(VariationalLayer, self).__init__(
validate_args=validate_args, name=name)
self._posterior = posterior
self._prior = prior
self._activation_fn = activation_fn
self._posterior_value_fn = posterior_value_fn
self._posterior_value = None
self._seed = SeedStream(seed, salt=self.name)
self._dtype = dtype
tf.nest.assert_same_structure(prior.dtype, posterior.dtype,
check_types=False)
@property
def dtype(self):
return self._dtype
@property
def posterior(self):
return self._posterior
@property
def prior(self):
return self._prior
@property
def activation_fn(self):
return self._activation_fn
@property
def posterior_value_fn(self):
return self._posterior_value_fn
@property
def posterior_value(self):
return self._posterior_value
class VariationalReparameterizationKernelBiasLayer(VariationalLayer):
"""Variational reparameterization linear layer."""
def __init__(
self,
posterior,
prior,
apply_kernel_fn,
activation_fn=None,
posterior_value_fn=tfd.Distribution.sample,
unpack_weights_fn=unpack_kernel_and_bias,
seed=None,
dtype=tf.float32,
validate_args=False,
name=None):
super(VariationalReparameterizationKernelBiasLayer, self).__init__(
posterior,
prior,
activation_fn=activation_fn,
posterior_value_fn=posterior_value_fn,
seed=seed,
dtype=dtype,
validate_args=validate_args,
name=name)
self._apply_kernel_fn = apply_kernel_fn
self._unpack_weights_fn = unpack_weights_fn
@property
def unpack_weights_fn(self):
return self._unpack_weights_fn
def __call__(self, x, **kwargs):
x = tf.convert_to_tensor(x, dtype=self.dtype, name='x')
self._posterior_value = self.posterior_value_fn(
self.posterior, seed=self._seed()) # pylint: disable=not-callable
kernel, bias = self.unpack_weights_fn(self.posterior_value) # pylint: disable=not-callable
y = x
if kernel is not None:
y = self._apply_kernel_fn(y, kernel)
if bias is not None:
y = y + bias
if self.activation_fn is not None:
y = self.activation_fn(y) # pylint: disable=not-callable
return y
class VariationalFlipoutKernelBiasLayer(VariationalLayer):
"""Variational flipout linear layer."""
def __init__(
self,
posterior,
prior,
apply_kernel_fn,
activation_fn=None,
posterior_value_fn=tfd.Distribution.sample,
unpack_weights_fn=unpack_kernel_and_bias,
seed=None,
dtype=tf.float32,
validate_args=False,
name=None):
super(VariationalFlipoutKernelBiasLayer, self).__init__(
posterior,
prior,
activation_fn=activation_fn,
posterior_value_fn=posterior_value_fn,
seed=seed,
dtype=dtype,
validate_args=validate_args,
name=name)
self._apply_kernel_fn = apply_kernel_fn
self._unpack_weights_fn = unpack_weights_fn
@property
def unpack_weights_fn(self):
return self._unpack_weights_fn
def __call__(self, x, **kwargs):
x = tf.convert_to_tensor(x, dtype=self.dtype, name='x')
self._posterior_value = self.posterior_value_fn(
self.posterior, seed=self._seed()) # pylint: disable=not-callable
kernel, bias = self.unpack_weights_fn(self.posterior_value) # pylint: disable=not-callable
y = x
if kernel is not None:
kernel_dist, _ = self.unpack_weights_fn( # pylint: disable=not-callable
self.posterior.sample_distributions(value=self.posterior_value)[0])
kernel_loc, kernel_scale = get_spherical_normal_loc_scale(kernel_dist)
# batch_size = tf.shape(x)[0]
# sign_input_shape = ([batch_size] +
# [1] * self._rank +
# [self._input_channels])
y *= tfp_random.rademacher(ps.shape(y),
dtype=y.dtype,
seed=self._seed())
kernel_perturb = normal_lib.Normal(loc=0., scale=kernel_scale)
y = self._apply_kernel_fn( # E.g., tf.matmul.
y,
kernel_perturb.sample(seed=self._seed()))
y *= tfp_random.rademacher(ps.shape(y),
dtype=y.dtype,
seed=self._seed())
y += self._apply_kernel_fn(x, kernel_loc)
if bias is not None:
y = y + bias
if self.activation_fn is not None:
y = self.activation_fn(y) # pylint: disable=not-callable
return y
def get_spherical_normal_loc_scale(d):
if isinstance(d, independent_lib.Independent):
return get_spherical_normal_loc_scale(d.distribution)
if isinstance(d, (normal_lib.Normal, mvn_diag_lib.MultivariateNormalDiag)):
return d.loc, d.scale
raise TypeError('Expected kernel `posterior` to be spherical Normal; '
'saw: "{}".'.format(type(d).__name__))
| [
"[email protected]"
] | |
22e0763bb5fdccc77c69692bfef9870bcca55aca | 3481356e47dcc23d06e54388153fe6ba795014fa | /comprehensive_test/BaseStruct/BaseStruct/BaseStruct.py | b17d76fdf85e1568f29014674e40ae6663232196 | [] | no_license | Chise1/pyhk | c09a4c5a06ce93e7fe50c0cc078429f7f63fcb2f | 44bdb51e1772efad9d0116feab1c991c601aa68a | refs/heads/master | 2021-01-03T08:24:47.255171 | 2020-02-29T04:05:30 | 2020-02-29T04:05:30 | 239,998,705 | 1 | 0 | null | 2020-02-28T07:35:46 | 2020-02-12T11:40:39 | C | UTF-8 | Python | false | false | 7,927 | py | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.12
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info >= (2, 7, 0):
def swig_import_helper():
import importlib
pkg = __name__.rpartition('.')[0]
mname = '.'.join((pkg, '_BaseStruct')).lstrip('.')
try:
return importlib.import_module(mname)
except ImportError:
return importlib.import_module('_BaseStruct')
_BaseStruct = swig_import_helper()
del swig_import_helper
elif _swig_python_version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_BaseStruct', [dirname(__file__)])
except ImportError:
import _BaseStruct
return _BaseStruct
try:
_mod = imp.load_module('_BaseStruct', fp, pathname, description)
finally:
if fp is not None:
fp.close()
return _mod
_BaseStruct = swig_import_helper()
del swig_import_helper
else:
import _BaseStruct
del _swig_python_version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr(self, class_type, name):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
raise AttributeError("'%s' object has no attribute '%s'" % (class_type.__name__, name))
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except __builtin__.Exception:
class _object:
pass
_newclass = 0
def add(a: 'int', b: 'int') -> "int":
return _BaseStruct.add(a, b)
add = _BaseStruct.add
class POINTER(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, POINTER, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, POINTER, name)
__repr__ = _swig_repr
__swig_setmethods__["x"] = _BaseStruct.POINTER_x_set
__swig_getmethods__["x"] = _BaseStruct.POINTER_x_get
if _newclass:
x = _swig_property(_BaseStruct.POINTER_x_get, _BaseStruct.POINTER_x_set)
__swig_setmethods__["y"] = _BaseStruct.POINTER_y_set
__swig_getmethods__["y"] = _BaseStruct.POINTER_y_get
if _newclass:
y = _swig_property(_BaseStruct.POINTER_y_get, _BaseStruct.POINTER_y_set)
def __init__(self):
this = _BaseStruct.new_POINTER()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _BaseStruct.delete_POINTER
__del__ = lambda self: None
POINTER_swigregister = _BaseStruct.POINTER_swigregister
POINTER_swigregister(POINTER)
class VECTOR(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, VECTOR, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, VECTOR, name)
__repr__ = _swig_repr
__swig_setmethods__["x"] = _BaseStruct.VECTOR_x_set
__swig_getmethods__["x"] = _BaseStruct.VECTOR_x_get
if _newclass:
x = _swig_property(_BaseStruct.VECTOR_x_get, _BaseStruct.VECTOR_x_set)
__swig_setmethods__["y"] = _BaseStruct.VECTOR_y_set
__swig_getmethods__["y"] = _BaseStruct.VECTOR_y_get
if _newclass:
y = _swig_property(_BaseStruct.VECTOR_y_get, _BaseStruct.VECTOR_y_set)
__swig_setmethods__["vector_length"] = _BaseStruct.VECTOR_vector_length_set
__swig_getmethods__["vector_length"] = _BaseStruct.VECTOR_vector_length_get
if _newclass:
vector_length = _swig_property(_BaseStruct.VECTOR_vector_length_get, _BaseStruct.VECTOR_vector_length_set)
def __init__(self):
this = _BaseStruct.new_VECTOR()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _BaseStruct.delete_VECTOR
__del__ = lambda self: None
VECTOR_swigregister = _BaseStruct.VECTOR_swigregister
VECTOR_swigregister(VECTOR)
class NET_DVR_TIME(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, NET_DVR_TIME, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, NET_DVR_TIME, name)
__repr__ = _swig_repr
__swig_setmethods__["dwYear"] = _BaseStruct.NET_DVR_TIME_dwYear_set
__swig_getmethods__["dwYear"] = _BaseStruct.NET_DVR_TIME_dwYear_get
if _newclass:
dwYear = _swig_property(_BaseStruct.NET_DVR_TIME_dwYear_get, _BaseStruct.NET_DVR_TIME_dwYear_set)
__swig_setmethods__["dwMonth"] = _BaseStruct.NET_DVR_TIME_dwMonth_set
__swig_getmethods__["dwMonth"] = _BaseStruct.NET_DVR_TIME_dwMonth_get
if _newclass:
dwMonth = _swig_property(_BaseStruct.NET_DVR_TIME_dwMonth_get, _BaseStruct.NET_DVR_TIME_dwMonth_set)
__swig_setmethods__["dwDay"] = _BaseStruct.NET_DVR_TIME_dwDay_set
__swig_getmethods__["dwDay"] = _BaseStruct.NET_DVR_TIME_dwDay_get
if _newclass:
dwDay = _swig_property(_BaseStruct.NET_DVR_TIME_dwDay_get, _BaseStruct.NET_DVR_TIME_dwDay_set)
__swig_setmethods__["dwHour"] = _BaseStruct.NET_DVR_TIME_dwHour_set
__swig_getmethods__["dwHour"] = _BaseStruct.NET_DVR_TIME_dwHour_get
if _newclass:
dwHour = _swig_property(_BaseStruct.NET_DVR_TIME_dwHour_get, _BaseStruct.NET_DVR_TIME_dwHour_set)
__swig_setmethods__["dwMinute"] = _BaseStruct.NET_DVR_TIME_dwMinute_set
__swig_getmethods__["dwMinute"] = _BaseStruct.NET_DVR_TIME_dwMinute_get
if _newclass:
dwMinute = _swig_property(_BaseStruct.NET_DVR_TIME_dwMinute_get, _BaseStruct.NET_DVR_TIME_dwMinute_set)
__swig_setmethods__["dwSecond"] = _BaseStruct.NET_DVR_TIME_dwSecond_set
__swig_getmethods__["dwSecond"] = _BaseStruct.NET_DVR_TIME_dwSecond_get
if _newclass:
dwSecond = _swig_property(_BaseStruct.NET_DVR_TIME_dwSecond_get, _BaseStruct.NET_DVR_TIME_dwSecond_set)
def __init__(self):
this = _BaseStruct.new_NET_DVR_TIME()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _BaseStruct.delete_NET_DVR_TIME
__del__ = lambda self: None
NET_DVR_TIME_swigregister = _BaseStruct.NET_DVR_TIME_swigregister
NET_DVR_TIME_swigregister(NET_DVR_TIME)
# This file is compatible with both classic and new-style classes.
| [
"[email protected]"
] | |
b95fc2cc946058c79099fb52b43c4dd92c36b741 | 0b86600e0288c0fefc081a0f428277a68b14882e | /tortue/tortue_2.py | c2f8f226561cdee9a094b36f6cc92224f645b31b | [] | no_license | Byliguel/python1-exo7 | 9ede37a8d2b8f384d1ebe3d612e8c25bbe47a350 | fbf6b08f4c1e94dd9f170875eee871a84849399e | refs/heads/master | 2020-09-22T10:16:34.044141 | 2019-12-01T11:52:51 | 2019-12-01T11:52:51 | 225,152,986 | 1 | 0 | null | 2019-12-01T11:51:37 | 2019-12-01T11:51:36 | null | UTF-8 | Python | false | false | 849 | py |
##############################
# Tortue
##############################
##############################
# Activité 2 - Boucle "pour"
##############################
##############################
# Question 1
from turtle import *
# Un pentagone
width(5)
color('blue')
for i in range(5):
forward(100)
left(72)
##############################
# Question 2
# Un autre pentagone
color('red')
longueur = 200
angle = 72
for i in range(5):
forward(longueur)
left(angle)
##############################
# Question 3
# Un dodecagone (12 côtés quoi)
color("purple")
n = 12
angle = 360/n
for i in range(n):
forward(100)
left(angle)
##############################
# Question 4
# Une spirale
color("green")
longueur = 10
for i in range(25):
forward(longueur)
left(40)
longueur = longueur + 10
exitonclick()
| [
"[email protected]"
] | |
2220848f9b639102050af94c3eaf9af3d8c8c619 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /oF8T7Apf7jfagC4fD_10.py | 1b90b98f9bbbebbad5d49653d4c2612e67ba78bc | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 143 | py |
def antipodes_average(lst):
lst1, lst2 = lst[:len(lst) // 2], lst[len(lst) // 2:][::-1]
return [(x + y) / 2 for x, y in zip(lst1, lst2)]
| [
"[email protected]"
] | |
15e9b01fef356ff7e0ed0f51586e912e135b0326 | d4a88e52bb2b504208fc8b278400791409c69dbb | /src/pyaC/mtools/sanityOfmtools.py | 090f52e68939621761d3f293755fe1ba6dad9178 | [
"MIT"
] | permissive | pcschneider/PyAstronomy | 23253ca57a35c2c4ed2ae01037f6512581e784bb | 42c2200e4d45832935b7a3d9b3b05aeb30c54b50 | refs/heads/master | 2020-04-05T22:53:02.187609 | 2017-03-02T12:14:56 | 2017-03-02T12:14:56 | 52,076,073 | 0 | 0 | null | 2016-02-19T09:17:26 | 2016-02-19T09:17:25 | null | UTF-8 | Python | false | false | 5,123 | py | from __future__ import print_function
import unittest
import os
class SanityOfmtools(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
if os.path.isfile("test.tmp"):
os.remove("test.tmp")
def sanity_numericalDerivative(self):
"""
mtools: Checking accuracy of numerical derivatives.
"""
# Check polynomial
from .numericalDerivatives import diffCFD
import numpy as np
# Analytical derivatives
x = np.linspace(-10,10,1000)
y = [np.poly1d((0.03, -0.31, 0.4, 0.35, 1.4))]
for i in range(4):
y.append(y[-1].deriv())
for erro in [2,4,6]:
for i in range(1,5):
indi, der = diffCFD(x, np.polyval(y[0], x), i, erro)
self.assertLess(np.max(np.abs(der/np.polyval(y[i], x[indi]) - 1.0)), 2e-2*pow(10.0,erro-2))
# Check trigonometric
y = [np.sin(x/10.0*2*np.pi+3)]
y.append(2*np.pi/10.0 * np.cos(x/10.0*2*np.pi+3))
y.append(-(2*np.pi/10.0)**2 * np.sin(x/10.0*2*np.pi+3))
y.append(-(2*np.pi/10.0)**3 * np.cos(x/10.0*2*np.pi+3))
y.append((2*np.pi/10.0)**4 * np.sin(x/10.0*2*np.pi+3))
for erro in [2,4,6]:
for i in range(1,5):
indi, der = diffCFD(x, y[0], i, erro)
self.assertLess(np.max(np.abs(der/y[i][indi] - 1.0)), 1e-3*pow(10.0,erro-2))
# Check exponential
y = np.exp(x)
for erro in [2,4,6]:
for i in range(1,5):
print(i, erro)
indi, der = diffCFD(x, y, i, erro)
self.assertLess(np.max(np.abs(der/y[indi] - 1.0)), 1e-3*pow(10.0,erro-2))
def sanity_diffCFDExample(self):
"""
mtools: diffCFD example
"""
from PyAstronomy import pyaC
import matplotlib.pylab as plt
import numpy as np
x = np.linspace(-10,10,1000)
# Computer polynomial and its derivatives
# (quasi analytically)
y = [np.poly1d((0.03, -0.31, 0.4, 0.35, 1.4))]
for i in range(4):
y.append(y[-1].deriv())
# Compute derivates numerically and compare to
# analytic solution
erro = 2
for i in range(1,5):
indi, der = pyaC.diffCFD(x, np.polyval(y[0], x), i, erro)
plt.plot(x[indi], np.polyval(y[i], x[indi]), 'b.')
plt.plot(x[indi], der, 'r--')
# plt.show()
def sanity_ibtrapzExample(self):
"""
mtools: Checking example of ibtrapz
"""
from PyAstronomy.pyaC import mtools
import numpy as np
x = np.arange(-2.,2.01,0.1)
y = x**3 + 1.7
x0 = -1.375
x1 = +1.943
# Analytical value of integral
analyt = 0.25*(x1**4 - x0**4) + 1.7*(x1-x0)
print("Analytical value: ", analyt)
print("ibtrapz: ", mtools.ibtrapz(x, y, x0, x1))
def sanity_ibtrapz(self):
"""
mtools: Checking ibtrapz
"""
from PyAstronomy.pyaC import mtools
import numpy as np
x = np.arange(-2.,2.01,0.1)
y = 2. * x
x0 = -1.375
x1 = +1.943
# Analytical value of integral
analyt = x1**2 - x0**2
self.assertAlmostEqual(analyt, mtools.ibtrapz(x, y, x0, x1), delta=1e-10, msg="ibtrapz incorrect for linear function.")
self.assertAlmostEqual((-1.9)**2-(-2.0)**2, mtools.ibtrapz(x, y, -2.0, -2.0+0.1), delta=1e-10, msg="ibtrapz incorrect for linear function (-2,-1.9).")
self.assertAlmostEqual(0.0, mtools.ibtrapz(x, y, -2.0, +2.0), delta=1e-10, msg="ibtrapz incorrect for linear function (-2,+2).")
def sanity_zerocross1dExample(self):
"""
Checking sanity of zerocross1d example
"""
import numpy as np
import matplotlib.pylab as plt
from PyAstronomy import pyaC
# Generate some 'data'
x = np.arange(100.)**2
y = np.sin(x)
# Set the last data point to zero.
# It will not be counted as a zero crossing!
y[-1] = 0
# Set point to zero. This will be counted as a
# zero crossing
y[10] = 0.0
# Get coordinates and indices of zero crossings
xc, xi = pyaC.zerocross1d(x, y, getIndices=True)
# Plot the data
plt.plot(x, y, 'b.-')
# Add black points where the zero line is crossed
plt.plot(xc, np.zeros(len(xc)), 'kp')
# Add green points at data points preceding an actual
# zero crossing.
plt.plot(x[xi], y[xi], 'gp')
# plt.show()
def sanity_zerocross1d(self):
"""
Checking sanity of zerocross1d
"""
import numpy as np
from PyAstronomy import pyaC
x = np.arange(3.)
y = np.array([0, 3, 0])
xz = pyaC.zerocross1d(x, y)
self.assertEqual(len(xz), 0, msg="Found zero crossing in 0,3,0 array (problem with first/last point).")
y = np.array([-1., 1., -2.])
xz, xi = pyaC.zerocross1d(x, y, getIndices=True)
self.assertEqual(len(xz), 2, msg="Found the following zero crossings in -1,1,-2 array: " + str(xz))
self.assertEqual(len(xz), len(xi), "Number of values and indicies is not identical.")
self.assertAlmostEqual(np.max(np.abs(np.array([0.5, 1.+1./3.])-xz)), 0.0, delta=1e-8, msg="Found unexpected zero crossings: " + str(xz))
self.assertEqual(np.max(np.abs(xi - np.array([0,1]))), 0, msg="Found unexpected indices: " + str(xi))
| [
"[email protected]"
] | |
5c805124bb7ea1d536d8e28ac2d185d90ef5a508 | d17403d0e6ffb7e32798df921e287ff60c8461f8 | /GuoMei/spiders/shop.py | 4a6f840433f0dd2f6da216f1748df92eb095dc3c | [] | no_license | beautifulmistake/GuoMeiMall | 7d29c67bb153f7f9b5924e9277fc13580b483fad | 9537c926c2891905732972fc061f8a3da2af6439 | refs/heads/master | 2020-05-31T06:43:41.858288 | 2019-06-11T08:45:30 | 2019-06-11T08:45:30 | 190,148,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,851 | py | """
此爬虫是根据关键字采集国美的店铺信息
响应的数据为 json 格式
"""
import json
import os
import scrapy
from scrapy.exceptions import CloseSpider
from scrapy.signals import spider_closed
from scrapy.spidermiddlewares.httperror import HttpError
from scrapy_redis.spiders import RedisSpider
from twisted.internet.error import TCPTimedOutError, DNSLookupError
from GuoMei.items import GuoMeiShop
class GuoMeiShopSpider(RedisSpider):
# 爬虫名称
name = 'shopSpider'
# 启动命令
redis_key = 'GuoMeiShopSpider:items'
# 配置信息
custom_settings = dict(
ITEM_PIPELINES={
'GuoMei.pipelines.ShopInfoPipeline': 300,
}
)
def __init__(self, settings):
super(GuoMeiShopSpider, self).__init__()
# 任务文件列表
self.keyword_file_list = os.listdir(settings.get("KEYWORD_PATH"))
# 店铺请求的URL 10 ----> 每页显示数量 2 -----> 页号
self.shop_url = 'https://apis.gome.com.cn/p/mall/10/{page}/{keyword}?from=search'
# headers
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/'
'537.36 (KHTML, like Gecko) Chrome/72.0.3626.109 Safari/537.36',
# '': '',
}
def parse_err(self, failure):
"""
异常处理函数
:param failure:
:return:
"""
if failure.check(TimeoutError, TCPTimedOutError, DNSLookupError):
# 失败的请求
request = failure.request
# 失败请求重新加入请求队列
self.server.rpush(self.redis_key, request)
if failure.check(HttpError):
# 响应
response = failure.value.response
# 失败请求重新加入请求队列
self.server.rpush(self.redis_key, response.url)
return
def start_requests(self):
"""
循环读取文件列表,生成初始请求
:return:
"""
if not self.keyword_file_list:
# 抛出异常,并关闭爬虫
raise CloseSpider('需要关键字文件')
for keyword_file in self.keyword_file_list:
# 循环获取关键字文件路径
file_path = os.path.join(self.settings.get("KEYWORD_PATH"), keyword_file)
# 读取文件
with open(file_path, 'r', encoding='utf-8') as f:
for keyword in f.readlines():
# 消除关键字末尾的空白字符
keyword = keyword.strip()
# 发起请求
yield scrapy.Request(url=self.shop_url.format(page=str(1), keyword=keyword), callback=self.parse,
errback=self.parse_err, meta={'keyword': keyword})
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
# 配置信息
settings = crawler.settings
# 爬虫信息
spider = super(GuoMeiShopSpider, cls).from_crawler(crawler, settings, *args, **kwargs)
# 终止爬虫信号
crawler.signals.connect(spider.spider_closed, signal=spider_closed)
# 返回 spider
return spider
def spider_closed(self, spider):
"""
自定义爬虫关闭执行的操作
:param spider:
:return:
"""
self.logger.info('Spider closed : %s', spider.name)
# 视具体的情况添加如下两个文件的操作方法
# spider.record_file.write("]")
# spider.record_file.close()
def parse(self, response):
if response.status == 200:
print(response.text)
# 关键字
keyword = response.meta['keyword']
# json ---> dict
res = json.loads(response.text, encoding='utf-8')
# 总页数
totalPage = res.get('totalPage')
# 当前页号
currentPage = res.get('currentPage')
# 搜索结果总数
totalCount = res.get('totalCount')
# 店铺信息列表
shopList = res.get('shopList')
if shopList:
for shop in shopList:
# item
item = GuoMeiShop()
# 关键字
item['keyword'] = keyword
# 店铺总数
item['totalCount'] = totalCount
# 店铺信息
item['shop_info'] = shop
yield item
if int(currentPage) < int(totalPage):
# 下一页
yield scrapy.Request(url=self.shop_url.format(page=int(currentPage) + 1, keyword=keyword),
callback=self.parse, errback=self.parse_err, meta=response.meta)
| [
"[email protected]"
] | |
b92f9667ab1f08bc8ef15d2340b5e322bfb3e78d | 522edf49c560a9d8857888d81500ecfd9d106933 | /WayneBlog/WayneBlog/autocomplete.py | e1968bfa4787e65f20e6208c09f6649e4e1b27c5 | [] | no_license | WayneChen1994/WayneBlog | d5590b48151ef6ffef5b6e461069bbd0113aefc6 | 79355c11976fdf84e5dc35bda628a2c4756676d0 | refs/heads/master | 2020-04-29T07:29:01.977198 | 2019-04-19T15:22:56 | 2019-04-19T15:22:56 | 175,945,954 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 739 | py | from dal import autocomplete
from blog.models import Category, Tag
class CategoryAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
if not self.request.user.is_authenticated():
return Category.objects.none()
qs = Category.objects.filter(owner=self.request.user)
if self.q:
qs = qs.filter(name__istartswith=self.q)
return qs
class TagAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
if not self.request.user.is_authenticated():
return Tag.objects.none()
qs = Tag.objects.filter(owner=self.request.user)
if self.q:
qs = qs.filter(name__istartswith=self.q)
return qs
| [
"[email protected]"
] | |
40cfecdd4710615953024f1d17a719922c50da7a | 3365e4d4fc67bbefe4e8c755af289c535437c6f4 | /.history/src/core/app/GUI_20170808112214.py | a41ca5b589222a9d9f1ef86e53b552cdda38bb1b | [] | no_license | kiranhegde/OncoPlotter | f3ab9cdf193e87c7be78b16501ad295ac8f7d2f1 | b79ac6aa9c6c2ca8173bc8992ba3230aa3880636 | refs/heads/master | 2021-05-21T16:23:45.087035 | 2017-09-07T01:13:16 | 2017-09-07T01:13:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,793 | py | #! python3
import ctypes
import os
import sys
from PyQt5 import QtCore, QtGui
from PyQt5.QtWidgets import (QAction, QApplication, QFileDialog, QFrame,
QMainWindow, QPushButton, QSizePolicy, QSplitter,
QTextEdit, QVBoxLayout, QWidget)
#GUI
import core.gui.mainwindow as mainwindow
#Support functions
from core.app.support_functions import import_plot_data
from core.dialogs.spider_dialog import Spider, SpiderPlotter
#Dialogs
from core.dialogs.swimmer_dialog import Swimmer, SwimmerPlotter
from core.dialogs.waterfall_dialog import Waterfall, WaterfallPlotter
image_dir = os.path.dirname(os.path.abspath('../OncoPlot'))
class MainWindow(QMainWindow, mainwindow.Ui_MainWindow):
waterfall_data_signal = QtCore.pyqtSignal(list)
def __init__(self,parent=None):
QMainWindow.__init__(self,parent)
self.setupUi(self)
self.setup_window()
def setup_window(self):
#Dialogs
self.Waterfall_Plot = WaterfallPlotter(self)
self.Waterfall = Waterfall(self)
self.Waterfall_Widget = QWidget()
self.Waterfall_Box = QVBoxLayout()
self.Waterfall_Splitter = QSplitter(QtCore.Qt.Horizontal)
self.Waterfall_Splitter.addWidget(self.Waterfall)
self.Waterfall_Splitter.addWidget(self.Waterfall_Plot)
self.Waterfall_Box.addWidget(self.Waterfall_Splitter)
self.Waterfall_Widget.setLayout(self.Waterfall_Box)
self.Spider_Widget = QWidget()
self.Spider_Box = QVBoxLayout()
self.Spider_Splitter = QSplitter(QtCore.Qt.Horizontal)
self.Spider_Plot = SpiderPlotter(self)
self.Spider = Spider(self)
self.Spider_Splitter.addWidget(self.Spider)
self.Spider_Splitter.addWidget(self.Spider_Plot)
self.Spider_Box.addWidget(self.Spider_Splitter)
self.Spider_Widget.setLayout(self.Spider_Box)
self.Swimmer_Widget = QWidget()
self.Swimmer_Box = QVBoxLayout()
self.Swimmer_Splitter = QSplitter(QtCore.Qt.Horizontal)
self.Swimmer_Plot = SwimmerPlotter(self)
self.Swimmer = Swimmer(self)
self.Swimmer_Splitter.addWidget(self.Swimmer)
self.Swimmer_Splitter.addWidget(self.Swimmer_Plot)
self.Swimmer_Box.addWidget(self.Swimmer_Splitter)
self.Swimmer_Widget.setLayout(self.Swimmer_Box)
self.stackedWidget.addWidget(self.Waterfall_Widget) #0
self.stackedWidget.addWidget(self.Spider_Widget) #1
self.stackedWidget.addWidget(self.Swimmer_Widget) #2
self.stackedWidget.hide()
#Set up toolBar
self.toolBar.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)
importAction = QAction(QtGui.QIcon(os.path.join(image_dir,'images\Download.png')), 'Import date template', self)
importAction.triggered.connect(self.import_data)
importAction.setIconText("Import")
self.toolBar.addAction(importAction)
self.toolBar.addSeparator()
dumpAction = QAction(QtGui.QIcon(os.path.join(image_dir,'images\Rubbish.png')), 'Import date template', self)
#dumpAction.triggered.connect(self.dump_data)
dumpAction.setIconText("Dump data")
self.toolBar.addAction(dumpAction)
self.toolBar.addSeparator()
self.waterfallAction = QAction(QtGui.QIcon(os.path.join(image_dir,'images\waterfall.png')), 'Waterfall plot', self)
self.waterfallAction.triggered.connect(self.launch_waterfall)
self.waterfallAction.setIconText("Waterfall")
self.waterfallAction.setEnabled(False)
self.toolBar.addAction(waterfallAction)
spiderAction = QAction(QtGui.QIcon(os.path.join(image_dir,'images\spider.png')), 'Spider plot', self)
spiderAction.triggered.connect(self.launch_spider)
spiderAction.setIconText("Spider")
spiderAction.setEnabled(False)
self.toolBar.addAction(spiderAction)
swimmerAction = QAction(QtGui.QIcon(os.path.join(image_dir,'images\swimmer_stack.png')), 'Swimmer plot', self)
swimmerAction.triggered.connect(self.launch_spider)
swimmerAction.setIconText("Swimmer")
swimmerAction.setEnabled(False)
self.toolBar.addAction(swimmerAction)
self.toolBar.addSeparator()
#Signal interconnections
self.waterfall_data_signal.connect(self.Waterfall.on_waterfall_data_signal)
self.waterfall_data_signal.connect(self.Waterfall_Plot.on_waterfall_data_signal)
self.Waterfall.general_settings_signal.connect(self.Waterfall_Plot.on_general_settings_signal)
#Launch functions
def launch_waterfall(self):
self.stackedWidget.setCurrentIndex(0)
self.stackedWidget.show()
def launch_spider(self):
self.stackedWidget.setCurrentIndex(1)
self.stackedWidget.show()
def launch_swimmer(self):
self.stackedWidget.setCurrentIndex(2)
self.stackedWidget.show()
def import_data(self):
self.file_path = QFileDialog.getOpenFileName(self,"Select Data Template", "C:\\")[0]
if self.file_path == '':
pass
else:
self.waterfall_data = import_plot_data(self.file_path)
self.waterfall_data_signal.emit(self.waterfall_data)
waterfallAction.setEnabled(True)
spiderAction.setEnabled(True)
swimmerAction.setEnabled(True)
def main():
myappid = u'OncoPlotter_V1.0'
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)
app = QApplication(sys.argv)
app.setApplicationName('OncoPlotter')
app.setStyle("plastique")
app.setStyleSheet("QSplitter::handle { background-color: gray }")
mainwindow = MainWindow()
mainwindow.show()
sys.exit(app.exec_())
if __name__=='__main__':
main()
| [
"[email protected]"
] | |
4a6477389290b79cec799eb18a7fb2e6883e2a89 | 42f418a4535c269ea1fbf2a023acf9805ee72c0a | /exploration/executables/December17/parabola_configurations.py | 934f2503cab59c915d941033f4c014dc5cf8b03f | [] | no_license | yumilceh/artificialbabbling | 10e6fe2fe19edbc0263c61b2897514cbae9a1c1f | 680b720e48b787bac1c626c597fd690159edd777 | refs/heads/master | 2022-11-12T22:21:38.606407 | 2018-04-24T14:37:20 | 2018-04-24T14:37:20 | 270,961,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,850 | py | """
Created on May 17, 2017
@author: Juan Manuel Acevedo Valle
"""
from exploration.algorithm.utils.competence_funcs import comp_Moulin2013_expl as comp_func_expl
from exploration.algorithm.utils.competence_funcs import comp_Moulin2013 as comp_func_
# from exploration.models.sensorimotor.ExplautoSM import ExplautoSM as ea_SM
# from exploration.models.sensorimotor.GMM_SM import GMM_SM
from exploration.models.Constraints.ExplautoCons import ExplautoCons as ea_cons
from exploration.models.Interest.ExplautoIM import explauto_IM as ea_IM
from exploration.models.Interest.Random import Random
# from exploration.models.Interest.GMM_IM import GMM_IM
# from exploration.models.Somatomotor.GMM_SS import GMM_SS
from exploration.models.sensorimotor.IGMM_SM import GMM_SM as IGMM_SM
# from exploration.models.Somatomotor.ILGMM_SS import GMM_SS as IGMM_SS
from exploration.models.sensorimotor.trash.ILGMM_SM2 import GMM_SM as ILGMM_old
from igmm import DynamicParameter
# from exploration.algorithm.utils.competence_funcs import comp_Baraglia2015_expl as comp_func_expl
# from exploration.algorithm.utils.competence_funcs import comp_Baraglia2015 as comp_func
comp_func = comp_func_
model_class = {'igmm_sm': IGMM_SM,
'igmm_old': ILGMM_old,
'igmm_ss': IGMM_SM,
'explauto_im': ea_IM,
'explauto_im_som': ea_IM,
'explauto_cons': ea_cons,
'random': Random}
models_params_list = {'igmm_sm': [],
'igmm_ss': [],
'igmm_old': [],
'explauto_cons': [], # 'LWLR-BFGS', 'nearest_neighbor', 'WNN', 'LWLR-CMAES'
'explauto_im': [],
'explauto_im_som':[],
'random': []
}
models_params_dict = {'igmm_ss': {'min_components': 3,
'max_step_components': 10, #5
'max_components': 80, #10
'sm_step': 120,
'somato': True,
'forgetting_factor': 0.2, #OK: 0.2, 0.05
'sigma_explo_ratio': 0.},
'igmm_sm': {'min_components': 3,
'somato': False,
'max_step_components': 5,
'max_components': 20,
'sm_step': 100,
'forgetting_factor': 0.05},
'igmm_old': {'min_components': 3,
'max_step_components': 5,
'max_components': 20,
'sm_step': 100,
'forgetting_factor': 0.2},
'explauto_cons': {'model_type': 'non_parametric', 'model_conf': {'fwd': 'WNN', 'inv': 'WNN',
'k':3, 'sigma':1.,
'sigma_explo_ratio':0.1}},
'explauto_im': {'competence_func': comp_func_expl, 'model_type': 'discretized_progress'},
'explauto_im_som': {'competence_func': comp_func_expl, 'model_type': 'discretized_progress','somato':True},
'random': {'mode': 'sensor'}
}
def model_(model_key, system, competence_func=None):
if competence_func is None:
return model_class[model_key](system, *models_params_list[model_key], **models_params_dict[model_key])
else:
return model_class[model_key](system, competence_func, *models_params_list[model_key],
**models_params_dict[model_key]) | [
"[email protected]"
] | |
ded8721ccec0deb754fd31df06a0b48f471fc4e1 | 7312266874e50682cf909f4b77260c9a69f13999 | /python/packages/scipy-0.6.0/scipy/sparse/setup.py | 9d54d45325a529b3cddd812510c4297bb0f15697 | [] | no_license | mbentz80/jzigbeercp | e354695b90a72c7fe3c5c7ec7d197d9cbc18d7d9 | 1a49320df3db13d0a06fddb30cf748b07e5ba5f0 | refs/heads/master | 2021-01-02T22:44:16.088783 | 2008-08-27T23:05:47 | 2008-08-27T23:05:47 | 40,231 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,323 | py | #!/usr/bin/env python
from os.path import join
import sys
def configuration(parent_package='',top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('sparse',parent_package,top_path)
config.add_data_dir('tests')
# Adding a Python file as a "source" file for an extension is something of
# a hack, but it works to put it in the right place.
sources = [join('sparsetools', x) for x in
['sparsetools.py', 'sparsetools_wrap.cxx']]
config.add_extension('_sparsetools',
sources=sources,
include_dirs=['sparsetools'],
)
## sparsetools_i_file = config.paths(join('sparsetools','sparsetools.i'))[0]
## def sparsetools_i(ext, build_dir):
## return sparsetools_i_file
## config.add_extension('_sparsetools',
## sources= [sparsetools_i_file],
## include_dirs=['sparsetools'],
## depends = [join('sparsetools', x) for x in
## ['sparsetools.i', 'sparsetools.h']]
## )
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| [
"[email protected]"
] | |
96f690a53f749d69fdfe0fbb6ef5e7280b753b9e | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/app/util/bin/format/coff/CoffSectionHeader3.pyi | 2d208f75241a47f7c45a55cecba6be313298d43c | [
"MIT"
] | permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,087 | pyi | from typing import List
import ghidra.app.util.bin
import ghidra.app.util.bin.format.coff
import ghidra.program.model.address
import ghidra.program.model.data
import ghidra.program.model.lang
import java.io
import java.lang
class CoffSectionHeader3(ghidra.app.util.bin.format.coff.CoffSectionHeader):
"""
A 0x2c byte COFF section header
"""
def equals(self, __a0: object) -> bool: ...
@overload
@staticmethod
def getAddress(language: ghidra.program.model.lang.Language, offset: long, section: ghidra.app.util.bin.format.coff.CoffSectionHeader) -> ghidra.program.model.address.Address:
"""
Convert address offset to an Address object. The default data space (defined by pspec)
will be used if section is null or corresponds to a data section. The language default
space (defined by slaspec) will be used for all non-data sections. If pspec does not
specify a default data space, the default language space is used.
@param language
@param offset address offset (byte offset assumed if section is null or is not explicitly
byte aligned, otherwise word offset assumed).
@param section section which contains the specified offset or null (data space assumed)
@return address object
"""
...
@overload
@staticmethod
def getAddress(language: ghidra.program.model.lang.Language, offset: long, space: ghidra.program.model.address.AddressSpace) -> ghidra.program.model.address.Address:
"""
Convert address offset to an Address in the specified space (defined by pspec).
If pspec does not specify a default data space, the default language space is used.
@param language
@param offset address offset (word offset assumed).
@param space address space
@return address object
"""
...
def getClass(self) -> java.lang.Class: ...
def getFlags(self) -> int:
"""
Returns the flags for this section.
@return the flags for this section
"""
...
def getLineNumberCount(self) -> int:
"""
Returns the number of line number entries for this section.
@return the number of line number entries for this section
"""
...
def getLineNumbers(self) -> List[ghidra.app.util.bin.format.coff.CoffLineNumber]: ...
def getName(self) -> unicode:
"""
Returns the section name.
The section name will never be more than eight characters.
@return the section name
"""
...
def getPage(self) -> int: ...
@overload
def getPhysicalAddress(self) -> int:
"""
Returns the physical address offset.
This is the address at which the section
should be loaded into memory and reflects a addressable word offset.
For linked executables, this is the absolute
address within the program space.
For unlinked objects, this address is relative
to the object's address space (i.e. the first section
is always at offset zero).
@return the physical address
"""
...
@overload
def getPhysicalAddress(self, language: ghidra.program.model.lang.Language) -> ghidra.program.model.address.Address:
"""
Returns the physical address.
This is the address at which the section
should be loaded into memory.
For linked executables, this is the absolute
address within the program space.
For unlinked objects, this address is relative
to the object's address space (i.e. the first section
is always at offset zero).
@return the physical address
"""
...
def getPointerToLineNumbers(self) -> int:
"""
Returns the file offset to the line numbers for this section.
@return the file offset to the line numbers for this section
"""
...
def getPointerToRawData(self) -> int:
"""
Returns the file offset to the section data.
@return the file offset to the section data
"""
...
def getPointerToRelocations(self) -> int:
"""
Returns the file offset to the relocations for this section.
@return the file offset to the relocations for this section
"""
...
def getRawDataStream(self, provider: ghidra.app.util.bin.ByteProvider, language: ghidra.program.model.lang.Language) -> java.io.InputStream:
"""
Returns an input stream that will supply the bytes
for this section.
@return the input stream
@throws IOException if an I/O error occurs
"""
...
def getRelocationCount(self) -> int:
"""
Returns the number of relocations for this section.
@return the number of relocations for this section
"""
...
def getRelocations(self) -> List[ghidra.app.util.bin.format.coff.CoffRelocation]: ...
def getReserved(self) -> int: ...
def getSize(self, language: ghidra.program.model.lang.Language) -> int:
"""
Returns the number of bytes of data stored in the file for this section.
NOTE: This value does not strictly indicate size in bytes.
For word-oriented machines, this value is represents
size in words.
@return the number of bytes of data stored in the file for this section
"""
...
def getVirtualAddress(self) -> int:
"""
Returns the virtual address.
This value is always the same as s_paddr.
@return the virtual address
"""
...
def hashCode(self) -> int: ...
def isAllocated(self) -> bool: ...
def isData(self) -> bool: ...
def isExecutable(self) -> bool: ...
def isExplicitlyByteAligned(self) -> bool:
"""
Returns true if this section is byte oriented and aligned and should assume
an addressable unit size of 1.
@return true if byte aligned, false if word aligned
"""
...
def isGroup(self) -> bool: ...
def isInitializedData(self) -> bool: ...
def isProcessedBytes(self, language: ghidra.program.model.lang.Language) -> bool: ...
def isReadable(self) -> bool: ...
def isUninitializedData(self) -> bool: ...
def isWritable(self) -> bool: ...
def move(self, offset: int) -> None:
"""
Adds offset to the physical address; this must be performed before
relocations in order to achieve the proper result.
@param offset the offset to add to the physical address
"""
...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toDataType(self) -> ghidra.program.model.data.DataType: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
| [
"[email protected]"
] | |
523591afd5c27c3429d387457b5d75979d089531 | 824f831ce0921b3e364060710c9e531f53e52227 | /Leetcode/Dynamic_Programming/LC-053. Maximum Subarray.py | e6b7d456cfe91003151f2a49f92a98deefd53cc7 | [] | no_license | adityakverma/Interview_Prepration | e854ff92c10d05bc2c82566ea797d2ce088de00a | d08a7f728c53943e9a27c33f8e4249633a69d1a6 | refs/heads/master | 2020-04-19T19:36:06.527353 | 2019-06-15T23:02:30 | 2019-06-15T23:02:30 | 168,392,921 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,000 | py | # Tags: Array, Dynamic Programming, Divide & Conquer, Microsoft, LinkedIn
# ========================================================================
# Given an integer array nums, find the contiguous subarray
# (containing at least one number) which has the largest sum and
# return its sum.
# Example:
# Input: [-2,1,-3,4,-1,2,1,-5,4],
# Output: 6; Explanation: [4,-1,2,1] has the largest sum = 6.
# ALSO check LC 152 - Same concept
class Solution():
def maxSubArray(self, nums):
for i in range(1, len(nums)):
nums[i] = max(nums[i - 1]+ nums[i], nums[i])
return max(nums)
# Using Dynamic Programming. O(n) Space : Looks like Kadane
def maxSubArray_DP(self, nums): # OKKK ....
if not nums:
return None
dp = [0] * len(nums)
res = dp[0] = nums[0]
for i in xrange(1, len(nums)):
dp[i] = max(dp[i - 1] + nums[i], nums[i]) # we need to find that contiguous portion of an array where the sum is maximum.
#res = max(res, dp[i])
#print dp
return max(dp)
# def maxProfit(self, prices):
# """
# :type prices: List[int]
# :rtype: int
# """
# if not prices:
# return 0
#
# curSum = maxSum = 0
# for i in range(1, len(prices)):
# curSum = max(0, curSum + prices[i] - prices[i - 1])
# maxSum = max(maxSum, curSum)
#
# return maxSum
if __name__ == '__main__':
nums = [-2,1,-3,4,-1,2,1,-5,4]
nums1 = [-2,-3,6,-8,2,-9]
s = Solution()
#print s.maxSubArray(nums)
print s.maxSubArray_DP(nums)
#print s.maxProfit(nums)
# I was asked a follow up question to this question in an interview." How would we solve this given that there is an endless incoming stream of numbers ?" Ideas anybody?
# https://leetcode.com/problems/maximum-subarray/discuss/179894/Follow-up-question-in-Intervierw
| [
"[email protected]"
] | |
dd539a1c149d1e02e104ac2f58495f0374684f81 | bdeb707894a4647cf46ab136e2ed5e7752094897 | /hive/server/condenser_api/tags.py | a75c6be3d711cd4314f922d71ff246c9fa559459 | [
"MIT"
] | permissive | Jolly-Pirate/hivemind | e5f636070b1e51bcd047678b758d5617ad4d83ec | 2bd91ece6c32f355bca40a156f2a5dc3d4d882bb | refs/heads/master | 2020-05-30T09:01:22.910947 | 2019-04-11T19:25:48 | 2019-04-11T19:25:48 | 189,629,870 | 0 | 0 | MIT | 2019-05-31T17:05:18 | 2019-05-31T17:01:27 | Python | UTF-8 | Python | false | false | 1,902 | py | """condenser_api trending tag fetching methods"""
from aiocache import cached
from hive.server.condenser_api.common import (return_error_info, valid_tag, valid_limit)
@return_error_info
@cached(ttl=7200, timeout=1200)
async def get_top_trending_tags_summary(context):
"""Get top 50 trending tags among pending posts."""
# Same results, more overhead:
#return [tag['name'] for tag in await get_trending_tags('', 50)]
sql = """
SELECT category
FROM hive_posts_cache
WHERE is_paidout = '0'
GROUP BY category
ORDER BY SUM(payout) DESC
LIMIT 50
"""
return await context['db'].query_col(sql)
@return_error_info
@cached(ttl=3600, timeout=1200)
async def get_trending_tags(context, start_tag: str = '', limit: int = 250):
"""Get top 250 trending tags among pending posts, with stats."""
limit = valid_limit(limit, ubound=250)
start_tag = valid_tag(start_tag or '', allow_empty=True)
if start_tag:
seek = """
HAVING SUM(payout) <= (
SELECT SUM(payout)
FROM hive_posts_cache
WHERE is_paidout = '0'
AND category = :start_tag)
"""
else:
seek = ''
sql = """
SELECT category,
COUNT(*) AS total_posts,
SUM(CASE WHEN depth = 0 THEN 1 ELSE 0 END) AS top_posts,
SUM(payout) AS total_payouts
FROM hive_posts_cache
WHERE is_paidout = '0'
GROUP BY category %s
ORDER BY SUM(payout) DESC
LIMIT :limit
""" % seek
out = []
for row in await context['db'].query_all(sql, limit=limit, start_tag=start_tag):
out.append({
'name': row['category'],
'comments': row['total_posts'] - row['top_posts'],
'top_posts': row['top_posts'],
'total_payouts': "%.3f SBD" % row['total_payouts']})
return out
| [
"[email protected]"
] | |
f27caf7428d9cbda9fc1a6c9fffb03dc2afaec0b | 83cf4aedab8f8b54753dfca1122346a88faa3c05 | /prysm/__init__.py | 7028af45080d8151982adc1e43a33f7bf9e984b8 | [
"MIT"
] | permissive | tealtortoise/prysm | 073c2a6bea10c390fb7be1d708ecab666a91bdb1 | 3c17cb7b6049a36a1f8b6a0035c216ca1078aee1 | refs/heads/master | 2022-05-08T05:30:21.819821 | 2019-04-12T11:49:48 | 2019-04-12T11:49:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,451 | py | """prysm, a python optics module."""
from pkg_resources import get_distribution
from prysm.conf import config
from prysm.convolution import Convolvable
from prysm.detector import Detector, OLPF, PixelAperture
from prysm.pupil import Pupil
from prysm.psf import PSF, AiryDisk
from prysm.otf import MTF
from prysm.interferogram import Interferogram
from prysm.geometry import (
circle,
truecircle,
gaussian,
rotated_ellipse,
square,
regular_polygon,
pentagon,
hexagon,
heptagon,
octagon,
nonagon,
decagon,
hendecagon,
dodecagon,
trisdecagon
)
from prysm.objects import (
Slit,
Pinhole,
SiemensStar,
TiltedSquare,
SlantedEdge,
)
from prysm.zernike import FringeZernike, NollZernike, zernikefit
from prysm.sample_data import sample_files
__all__ = [
'config',
'Detector',
'OLPF',
'PixelAperture',
'Pupil',
'FringeZernike',
'NollZernike',
'zernikefit',
'Interferogram',
'PSF',
'AiryDisk',
'MTF',
'gaussian',
'rotated_ellipse',
'regular_polygon',
'square',
'pentagon',
'hexagon',
'heptagon',
'octagon',
'nonagon',
'decagon',
'hendecagon',
'dodecagon',
'trisdecagon',
'Slit',
'Pinhole',
'SiemensStar',
'TiltedSquare',
'SlantedEdge',
'Convolvable',
'circle',
'truecircle',
'sample_files',
]
__version__ = get_distribution('prysm').version
| [
"[email protected]"
] | |
c6ddd34116e222c160be95243b8b1df1eb4c67b3 | 411d9c64d2f2142f225582f2b4af1280310426f6 | /sk/logistic.py | b193e93f1811528873b0dbd74620a83c1a463b8f | [] | no_license | 631068264/learn-sktf | 5a0dfafb898acda83a80dc303b6d6d56e30e7cab | 4ba36c89003fca6797025319e81fd9863fbd05b1 | refs/heads/master | 2022-10-15T03:29:38.709720 | 2022-09-24T12:56:41 | 2022-09-24T12:56:41 | 133,602,172 | 0 | 0 | null | 2022-09-24T12:57:23 | 2018-05-16T02:57:01 | Python | UTF-8 | Python | false | false | 1,522 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author = 'wyx'
@time = 2018/5/13 18:27
@annotation = ''
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
iris = datasets.load_iris()
# petal width
X = iris["data"][:, 3:]
# 1 if Iris-Virginica, else 0
y = (iris["target"] == 2).astype(np.int)
from sklearn.linear_model import LogisticRegression
"""
LogisticRegression
Softmax回归分类器一次只能预测一个类(即,它是多类的,而不是多输出的),
所以它只能用于互斥类,如不同类型的植物。你不能用它来识别一张照片中的多个人。
"""
if False:
log_reg = LogisticRegression()
log_reg.fit(X, y)
X_new = np.linspace(0, 3, 1000).reshape(-1, 1)
y_proba = log_reg.predict_proba(X_new)
plt.plot(X_new, y_proba[:, 1], "g-", label="Iris-Virginica")
plt.plot(X_new, y_proba[:, 0], "b--", label="Not Iris-Virginica")
plt.legend()
plt.show()
"""
多项
Scikit-Learn's LogisticRegression在两个以上的类上进行训练时默认使用“一对多”,
但您可以将multi_class超参数设置为“多项”来将其切换为Softmax回归 随机平均梯度下降解算器。
您还必须指定一个支持Softmax回归的解算器,例如“lbfgs”求解器
"""
X = iris["data"][:, (2, 3)]
y = iris["target"]
softmax_reg = LogisticRegression(multi_class="multinomial", solver="lbfgs", C=10)
softmax_reg.fit(X, y)
print(softmax_reg.predict([[5, 2]]))
print(softmax_reg.predict_proba([[5, 2]]))
| [
"[email protected]"
] | |
e812462a5f03c13652d8da807578cfd1734c6036 | 316bbaec3cd0e1d11aa2a6df417714f568b71b8d | /dm6/wrappers/macs2/.snakemake.pzzkm37x.wrapper.py | 29b280403b9d233af358914288300621a7d9ec98 | [] | no_license | HussainAther/optimizepeakcallers | dfe34574943ac56d06dc8a0463435585ce75629f | 5f40a2700bbad9f5d6f93676a4c377ea2da214f4 | refs/heads/master | 2020-03-08T07:20:00.949265 | 2018-04-09T17:36:16 | 2018-04-09T17:36:16 | 127,991,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,970 | py |
######## Snakemake header ########
import sys; sys.path.insert(0, "/home/athersh/miniconda3/envs/snakemake/lib/python3.5/site-packages"); import pickle; snakemake = pickle.loads(b'\x80\x03csnakemake.script\nSnakemake\nq\x00)\x81q\x01}q\x02(X\x04\x00\x00\x00ruleq\x03X\x05\x00\x00\x00macs2q\x04X\x06\x00\x00\x00paramsq\x05csnakemake.io\nParams\nq\x06)\x81q\x07X$\x00\x00\x00peak_out/macs2/el41_bg3_rump-10c3_R1q\x08a}q\t(X\x06\x00\x00\x00prefixq\nh\x08X\x06\x00\x00\x00_namesq\x0b}q\x0ch\nK\x00N\x86q\rsubX\x03\x00\x00\x00logq\x0ecsnakemake.io\nLog\nq\x0f)\x81q\x10}q\x11h\x0b}q\x12sbX\t\x00\x00\x00wildcardsq\x13csnakemake.io\nWildcards\nq\x14)\x81q\x15X\x15\x00\x00\x00el41_bg3_rump-10c3_R1q\x16a}q\x17(h\x0b}q\x18X\x06\x00\x00\x00sampleq\x19K\x00N\x86q\x1asX\x06\x00\x00\x00sampleq\x1bh\x16ubX\t\x00\x00\x00resourcesq\x1ccsnakemake.io\nResources\nq\x1d)\x81q\x1e(K\x01K\x01e}q\x1f(X\x06\x00\x00\x00_nodesq K\x01h\x0b}q!(h K\x00N\x86q"X\x06\x00\x00\x00_coresq#K\x01N\x86q$uh#K\x01ubX\x06\x00\x00\x00outputq%csnakemake.io\nOutputFiles\nq&)\x81q\'(X>\x00\x00\x00peak_out/macs2/el41_bg3_rump-10c3_R1/el41_bg3_rump-10c3_R1.bedq(XK\x00\x00\x00peak_out/macs2/el41_bg3_rump-10c3_R1/el41_bg3_rump-10c3_R1_peaks.narrowPeakq)e}q*(X\x03\x00\x00\x00bedq+h(X\n\x00\x00\x00narrowPeakq,h)h\x0b}q-(h+K\x00N\x86q.h,K\x01N\x86q/uubX\x05\x00\x00\x00inputq0csnakemake.io\nInputFiles\nq1)\x81q2(X\x1b\x00\x00\x00dedup/el37_bg3_input_R1.bamq3X\x1f\x00\x00\x00dedup/el41_bg3_rump-10c3_R1.bamq4e}q5(X\x03\x00\x00\x00inpq6h3h\x0b}q7(h6K\x00N\x86q8X\x02\x00\x00\x00ipq9K\x01N\x86q:uh9h4ubX\x06\x00\x00\x00configq;}q<X\x07\x00\x00\x00threadsq=K\x01ub.')
######## Original script #########
from snakemake.shell import shell
shell(
'macs2 '
'callpeak '
'-c {snakemake.input.inp} '
'-t {snakemake.input.ip} '
'--bdg --SPMR '
'-n {snakemake.wildcards.sample} '
'--outdir {snakemake.params.prefix}'
)
shell('Rscript {snakemake.params.prefix}/{snakemake.sample}_model.r')
| [
"[email protected]"
] | |
e12e3975e4e78c3b035185b63e0267d080e2e194 | 366c30997f60ed5ec19bee9d61c1c324282fe2bb | /deb/openmediavault/usr/sbin/omv-mkaptidx | b4820779bddb8f05d68daeb91a1c9571cb97b67d | [] | no_license | maniacs-ops/openmediavault | 9a72704b8e30d34853c991cb68fb055b767e7c6e | a1c26bdf269eb996405ce6de72211a051719d9e7 | refs/heads/master | 2021-01-11T17:28:04.571883 | 2017-01-19T22:06:35 | 2017-01-19T22:06:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,989 | #!/usr/bin/env python3
#
# This file is part of OpenMediaVault.
#
# @license http://www.gnu.org/licenses/gpl.html GPL Version 3
# @author Volker Theile <[email protected]>
# @copyright Copyright (c) 2009-2017 Volker Theile
#
# OpenMediaVault is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# OpenMediaVault is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OpenMediaVault. If not, see <http://www.gnu.org/licenses/>.
# The following error might happen from time to time.
#
# Traceback (most recent call last):
# File "/usr/sbin/omv-mkaptidx", line 27, in <module>
# import openmediavault
# EOFError: EOF read where not expected
#
# To analyse the error execute:
# python3 -vc 'import openmediavault'
#
# To fix this error simply execute the following command:
# rm -f /usr/lib/python3/dist-packages/__pycache__/openmediavault.cpython-32.pyc
import sys
import apt
import apt_pkg
import json
import re
import openmediavault as omv
pi = omv.ProductInfo()
class OpenMediaVaultPluginsFilter(apt.cache.Filter):
def apply(self, pkg):
m = re.match(r"^%s-(\S+)$" % pi.package_name, pkg.name)
if not m:
return False
if m.group(1) == "keyring":
return False
return True
def get_extended_description(raw_description):
"""
Return the extended description according to the Debian policy
(Chapter 5.6.13).
See http://www.debian.org/doc/debian-policy/ch-controlfields.html
for more information.
"""
parts = raw_description.partition("\n");
lines = parts[2].split("\n");
for i, line in enumerate(lines):
lines[i] = line.strip();
if lines[i] == ".":
lines[i] = "\n"
return "\n".join(lines)
cache = apt.cache.Cache()
# Create the '/var/lib/openmediavault/apt/upgradeindex.json' file.
print("Creating index of upgradeable packages ...")
data = []
cache.upgrade(True)
for pkg in cache.get_changes():
if pkg.candidate is None:
continue
data.append({
"name": pkg.name,
"oldversion": pkg.installed.version
if pkg.is_installed and pkg.installed is not None
else "",
"repository": "%s/%s" % (pkg.candidate.origins[0].label,
pkg.candidate.origins[0].archive)
if pkg.candidate.origins is not None
else "",
"package": pkg.candidate.record.get("Package"),
"source": pkg.candidate.source_name,
"sourceversion": pkg.candidate.source_version,
"version": pkg.candidate.version,
"installedsize": pkg.candidate.size,
"maintainer": pkg.candidate.record.get("Maintainer", ""),
"architecture": pkg.candidate.architecture,
"depends": pkg.candidate.record.get("Depends", ""),
"suggests": pkg.candidate.record.get("Suggests", ""),
"conflicts": pkg.candidate.record.get("Conflicts", ""),
"breaks": pkg.candidate.record.get("Breaks", ""),
"abstract": pkg.candidate.summary, # Deprecated
"summary": pkg.candidate.summary,
"description": pkg.candidate.record.get("Description", ""),
"extendeddescription": get_extended_description(
pkg.candidate.raw_description),
"homepage": pkg.candidate.homepage,
"descriptionmd5": pkg.candidate.record.get("Description-md5", ""),
"multiarch": pkg.candidate.record.get("Multi-Arch", ""),
"predepends": pkg.candidate.record.get("Pre-Depends", ""),
"section": pkg.candidate.section,
"priority": pkg.candidate.priority,
"filename": pkg.candidate.filename,
"size": pkg.candidate.size,
"md5sum": pkg.candidate.md5,
"sha1": pkg.candidate.sha1,
"sha256": pkg.candidate.sha256,
"uri": pkg.candidate.uri,
"uris": pkg.candidate.uris
})
with open(omv.getenv('OMV_APT_UPGRADE_INDEX_FILE',
'/var/lib/openmediavault/apt/upgradeindex.json'), 'w') as outfile:
json.dump(data, outfile, sort_keys=True, indent=4)
# Create the '/var/lib/openmediavault/apt/pluginsindex.json' file.
print("Creating index of %s plugins ..." % pi.name)
data = []
cache = apt.cache.Cache()
fcache = apt.cache.FilteredCache(cache)
fcache.set_filter(OpenMediaVaultPluginsFilter())
for pkg in fcache:
if pkg.candidate is None:
continue
data.append({
"name": pkg.name,
"repository": "%s/%s" % (pkg.candidate.origins[0].label,
pkg.candidate.origins[0].archive)
if pkg.candidate.origins is not None
else "",
"package": pkg.candidate.record.get("Package"),
"version": pkg.candidate.version,
"installedsize": pkg.candidate.size,
"maintainer": pkg.candidate.record.get("Maintainer", ""),
"architecture": pkg.candidate.architecture,
"depends": pkg.candidate.record.get("Depends", ""),
"suggests": pkg.candidate.record.get("Suggests", ""),
"conflicts": pkg.candidate.record.get("Conflicts", ""),
"breaks": pkg.candidate.record.get("Breaks", ""),
"abstract": pkg.candidate.summary, # Deprecated
"summary": pkg.candidate.summary,
"description": pkg.candidate.record.get("Description", ""),
"extendeddescription": get_extended_description(
pkg.candidate.raw_description),
"homepage": pkg.candidate.homepage,
"descriptionmd5": pkg.candidate.record.get("Description-md5", ""),
"multiarch": pkg.candidate.record.get("Multi-Arch", ""),
"predepends": pkg.candidate.record.get("Pre-Depends", ""),
"section": pkg.candidate.section,
"pluginsection": pkg.candidate.record.get("Plugin-Section", ""),
"priority": pkg.candidate.priority,
"filename": pkg.candidate.filename,
"size": pkg.candidate.size,
"md5sum": pkg.candidate.md5,
"sha1": pkg.candidate.sha1,
"sha256": pkg.candidate.sha256,
"installed": pkg.is_installed
})
with open(omv.getenv('OMV_APT_PLUGINS_INDEX_FILE',
'/var/lib/openmediavault/apt/pluginsindex.json'), 'w') as outfile:
json.dump(data, outfile, sort_keys=True, indent=4)
sys.exit(0)
| [
"[email protected]"
] | ||
fb3357f74e50b16b2e9ff2a5c707a1cd76f60390 | d9c95cd0efad0788bf17672f6a4ec3b29cfd2e86 | /disturbance/migrations/0102_sitetransferapiarysite_customer_selected.py | 244cec5a7829e05fb0fa9710029b020299d7fdb4 | [
"Apache-2.0"
] | permissive | Djandwich/disturbance | cb1d25701b23414cd91e3ac5b0207618cd03a7e5 | b1ba1404b9ca7c941891ea42c00b9ff9bcc41237 | refs/heads/master | 2023-05-05T19:52:36.124923 | 2021-06-03T06:37:53 | 2021-06-03T06:37:53 | 259,816,629 | 1 | 1 | NOASSERTION | 2021-06-03T09:46:46 | 2020-04-29T03:39:33 | Python | UTF-8 | Python | false | false | 483 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2020-07-08 02:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('disturbance', '0101_auto_20200708_1021'),
]
operations = [
migrations.AddField(
model_name='sitetransferapiarysite',
name='customer_selected',
field=models.BooleanField(default=False),
),
]
| [
"[email protected]"
] | |
bb2be937acf8912948c17324039d31f3142c2686 | 10fbe5526e5f0b8588b65f70f088cd86b6e9afbe | /rqwywo/migrations/0007_mahcgzunnb.py | b7a865d124c035fd8b4fe42e8e9ea49a6e5daaa5 | [] | no_license | MarkusH/django-migrations-benchmark | eb4b2312bb30a5a5d2abf25e95eca8f714162056 | e2bd24755389668b34b87d254ec8ac63725dc56e | refs/heads/master | 2016-09-05T15:36:45.250134 | 2015-03-31T23:44:28 | 2015-03-31T23:44:28 | 31,168,231 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 619 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('digmcd', '0014_auto_20150218_1625'),
('rqwywo', '0006_auto_20150218_1623'),
]
operations = [
migrations.CreateModel(
name='Mahcgzunnb',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('jytvvd', models.ForeignKey(null=True, related_name='+', to='digmcd.Untgafvod')),
],
),
]
| [
"[email protected]"
] | |
05a061fd125b2b45d56a164419cb1734443f7bcd | 21b4585de4a0eacdb0d1e488dfae53684bb6564e | /111. Minimum Depth of Binary Tree.py | 8df278cffa42331ca4af1a0e9c38c21b64172b2d | [] | no_license | gaosq0604/LeetCode-in-Python | de8d0cec3ba349d6a6462f66286fb3ddda970bae | 57ec95779a4109008dbd32e325cb407fcbfe5a52 | refs/heads/master | 2021-09-15T23:14:14.565865 | 2018-06-12T16:30:40 | 2018-06-12T16:30:40 | 113,881,474 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def minDepth(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if not root:
return 0
d = self.minDepth(root.left), self.minDepth(root.right)
return 1 + (min(d) or max(d)) | [
"[email protected]"
] | |
a50d73c260c8760f213ba7dc25fe02ae4536540b | 350f0721f490a7fd9ab532b666bed72108a2be81 | /src/classes/list.py | 2ad41e74260ae9b201ff5753e06a981494e00e68 | [
"MIT"
] | permissive | 810Teams/mal-chart-maker | bced08ee223e8cda66de52fdbf6d172e5352563f | 7a5570d2bc7cd23e1dee6631e62a73c959587c04 | refs/heads/main | 2023-05-25T00:52:13.596168 | 2023-05-13T10:22:27 | 2023-05-13T10:22:27 | 349,377,549 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,092 | py | """
`data.py`
"""
from src.utils import error
from src.classes.entry import Entry, Anime, Manga
from math import ceil, floor, sqrt
class List:
""" User anime/manga list class """
def __init__(
self,
data: list=list(),
include_current: bool=False,
include_onhold: bool=False,
include_dropped: bool=False,
include_planned: bool=False,
tag_rules: list=None
):
""" Constructor """
self.data = data
self.include_current = include_current
self.include_onhold = include_onhold
self.include_dropped = include_dropped
self.include_planned = include_planned
self.tag_rules = tag_rules
def add_entry(self, entry: Entry) -> None:
""" Add anime/manga object to the anime/manga list by object """
self.data.append(entry)
def get_entry(self, entry_id: str) -> Entry:
""" Get anime/manga object from the anime/manga list by anime/manga ID """
for i in range(len(self.data)):
if isinstance(self.data[i], Anime) and self.data[i].series_animedb_id == entry_id:
return self.data[i]
elif isinstance(self.data[i], Manga) and self.data[i].manga_mangadb_id == entry_id:
return self.data[i]
return None
def delete_entry(self, entry_id: str) -> Entry:
""" Delete anime/manga object from the anime/manga list by anime/manga ID """
for i in range(len(self.data)):
if isinstance(self.data[i], Anime) and self.data[i].series_animedb_id == entry_id:
return self.data.pop(i)
elif isinstance(self.data[i], Manga) and self.data[i].manga_mangadb_id == entry_id:
return self.data.pop(i)
return None
def count(self, key: str) -> int:
""" Count anime/manga with a specific status """
if key == 'all':
return len(self.data)
elif key.title().replace('To', 'to') in ('Watching', 'Reading', 'Completed', 'On-Hold', 'Dropped', 'Plan to Watch', 'Plan to Read'):
return len([i for i in self.data if i.my_status == key.title().replace('To', 'to')])
return 0
def get_list(self, include_unscored: bool=False) -> list:
""" Get anime/manga list """
return [
i for i in self.data
if (i.my_status != 'Watching' or self.include_current)
and (i.my_status != 'On-Hold' or self.include_onhold)
and (i.my_status != 'Dropped' or self.include_dropped)
and (i.my_status != 'Plan to Watch' or self.include_planned)
and (i.my_score != 0 or include_unscored)
]
def get_full_list(self, include_unscored: bool=False) -> list:
""" Get full anime/manga list """
return [i for i in self.data if i.my_score != 0 or include_unscored]
def get_grouped_list(
self,
include_unscored: bool=False,
group_by: str='series_type',
sort_method: str='most_common',
sort_order: str='descending',
manual_sort: list=None,
disassemble_key: str=None
) -> dict:
""" Get grouped anime/manga list """
grouped_entry_list = dict()
categories = list()
filtered_entry_list = self.get_list(include_unscored=include_unscored)
# Category Retrieval
for _ in filtered_entry_list:
if eval('_.{}'.format(group_by)) not in categories:
categories.append(eval('_.{}'.format(group_by)))
# Category Sorting
if sort_method == 'most_common':
categories.sort(
key=lambda i: [eval('j.{}'.format(group_by)) for j in filtered_entry_list].count(i),
reverse=sort_order != 'ascending'
)
elif sort_method == 'alphabetical':
categories.sort(
reverse=sort_order != 'ascending'
)
else:
error('Invalid sort_method `{}` of get_grouped_list().'.format(sort_method))
return None
# Manual Sort Override
if manual_sort != None:
old_categories = [i for i in categories]
categories = list()
for i in manual_sort:
if i in old_categories:
categories.append(i)
old_categories.remove(i)
categories += old_categories
# Packing Categories
for i in categories:
grouped_entry_list[i] = [j for j in filtered_entry_list if eval('j.{}'.format(group_by)) == i]
# Desired Data Retrieval
if disassemble_key != None:
for i in grouped_entry_list:
for j in range(len(grouped_entry_list[i])):
temp = ['grouped_entry_list[i][j].{}'.format(k) for k in disassemble_key]
for k in range(len(temp)):
temp[k] = eval(temp[k])
grouped_entry_list[i][j] = temp
# Return
return grouped_entry_list
def get_scores(self, include_unscored: bool=False) -> list:
""" Get anime/manga scores """
return [i.my_score for i in self.get_list(include_unscored=include_unscored)]
def get_summed_scores(self, include_unscored: bool=False) -> list:
""" Get summed anime/manga scores """
return [
self.get_scores(include_unscored=include_unscored).count(i)
for i in range(1 - include_unscored, 11)
]
def get_grouped_scores(
self,
include_unscored: bool=False,
group_by: str='series_type',
sort_method: str='most_common',
sort_order: str='descending',
manual_sort: bool=None
) -> dict:
""" Get grouped anime/manga scores """
grouped_entry_list = self.get_grouped_list(
include_unscored=False,
group_by=group_by,
sort_method=sort_method,
sort_order=sort_order,
manual_sort=manual_sort
)
for i in grouped_entry_list:
for j in range(len(grouped_entry_list[i])):
grouped_entry_list[i][j] = grouped_entry_list[i][j].my_score
return grouped_entry_list
def get_summed_grouped_scores(
self,
include_unscored: bool=False,
group_by: str='series_type',
sort_method: str='most_common',
sort_order: str='descending',
manual_sort: bool=None
) -> dict:
""" Get summed grouped anime/manga scores """
scores = self.get_grouped_scores(
include_unscored=include_unscored,
group_by=group_by,
sort_method=sort_method,
sort_order=sort_order,
manual_sort=manual_sort
)
for i in scores:
scores[i] = [scores[i].count(j) for j in range(1 - include_unscored, 11)]
return scores
def get_min(self) -> int:
""" Get a minimum of the anime/manga list scores """
return min(self.get_scores())
def get_max(self) -> int:
""" Get a maximum of the anime/manga list scores """
return max(self.get_scores())
def get_average(self) -> float:
""" Get an average of the anime/manga list scores """
scores = self.get_scores()
return sum(scores) / len(scores)
def get_median(self) -> int:
""" Get a median of the anime/manga list scores """
scores = sorted(self.get_scores())
if len(scores) % 2 == 0:
return (scores[len(scores) // 2 - 1] + scores[len(scores) // 2]) / 2
return scores[len(scores) // 2]
def get_mode(self) -> int:
""" Get a mode of the anime/manga list scores """
return max(self.get_summed_scores())
def get_sd(self) -> float:
""" Get a standard deviation of the anime/manga list scores """
scores = self.get_scores()
return sqrt(sum([(i - self.get_average()) ** 2 for i in scores]) / len(scores))
def get_partial(self, percentage: float, part: str='top', rounding_method: str='roundx', include_unscored: bool=False) -> list:
""" Get partial anime/manga list """
# Anime/manga List Initiation
entry_list = self.get_list(include_unscored=include_unscored)
entry_list.sort(key=lambda i: i.my_score, reverse=True)
# Anime/manga Count Calculation
entry_count = percentage / 100 * len(entry_list)
# Anime/manga Count Rounding Method
if rounding_method == 'floor':
entry_count = floor(entry_count)
elif rounding_method == 'ceil':
entry_count = ceil(entry_count)
elif rounding_method == 'round':
entry_count = round(entry_count)
elif rounding_method == 'roundx':
if entry_count % 0.5 == 0:
entry_count = floor(entry_count)
else:
entry_count = round(entry_count)
else:
error('Invalid rounding_method `{}` of get_partial().'.format(rounding_method))
return None
# Anime/manga List Slicing
if part == 'top':
return entry_list[:entry_count]
elif part == 'bottom':
entry_list.reverse()
return entry_list[:entry_count]
elif part == 'middle':
middle = len(entry_list)//2
upper = middle + floor(entry_count/2)
lower = middle - ceil(entry_count/2)
return entry_list[lower:upper]
else:
error('Invalid part `{}` of get_partial().'.format(part))
return None
def get_partial_average(self, percentage: float, part: str='top', rounding_method: str='roundx', include_unscored:bool=False) -> float:
""" Get partial anime/manga list average """
entry_list = self.get_partial(
percentage=percentage,
part=part,
rounding_method=rounding_method,
include_unscored=include_unscored
)
scores = [i.my_score for i in entry_list]
return sum(scores)/len(scores)
| [
"[email protected]"
] | |
6476a4bce704d3c37f77b64321d4174453e2cdc5 | 5d069aa71e5cd242b4f1f29541dc85107822cbda | /mc_states/modules/mc_www.py | 319be1cb2d7aa0ca54f2980cd05641837f59532c | [
"BSD-3-Clause"
] | permissive | h4ck3rm1k3/makina-states | f22a5e3a0dde9d545108b4c14279451198682370 | 3f2dbe44867f286b5dea81b9752fc8ee332f3929 | refs/heads/master | 2020-02-26T13:17:09.895814 | 2015-01-12T21:45:02 | 2015-01-12T21:45:02 | 29,172,493 | 0 | 0 | null | 2015-01-13T04:23:09 | 2015-01-13T04:23:07 | null | UTF-8 | Python | false | false | 1,230 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
.. _module_mc_www:
mc_www / www registry
============================================
If you alter this module and want to test it, do not forget
to deploy it on minion using::
salt '*' saltutil.sync_modules
Documentation of this module is available with::
salt '*' sys.doc mc_www
'''
# Import python libs
import logging
import mc_states.utils
__name = 'www'
log = logging.getLogger(__name__)
def settings():
'''
www registry
fastcgi_socket_directory
fastcgi socket directory
'''
@mc_states.utils.lazy_subregistry_get(__salt__, __name)
def _settings():
grains = __grains__
pillar = __pillar__
locations = __salt__['mc_locations.settings']()
wwwData = __salt__['mc_utils.defaults'](
'makina-states.services.http.www', {
'doc_root': '/var/www/default',
'serveradmin_mail': 'webmaster@localhost',
'socket_directory': '/var/spool/www',
'upload_max_filesize': '25000000M',
})
return wwwData
return _settings()
def dump():
return mc_states.utils.dump(__salt__,__name)
# vim:set et sts=4 ts=4 tw=80:
| [
"[email protected]"
] | |
35df2d756e2076153f711a103a0783d15316232b | ce29884aa23fbb74a779145046d3441c619b6a3c | /all/217.py | 280394e1b4abf3d78e10998357c12d53a005a8c9 | [] | no_license | gebijiaxiaowang/leetcode | 6a4f1e3f5f25cc78a5880af52d62373f39a546e7 | 38eec6f07fdc16658372490cd8c68dcb3d88a77f | refs/heads/master | 2023-04-21T06:16:37.353787 | 2021-05-11T12:41:21 | 2021-05-11T12:41:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | #!/usr/bin/python3.7
# -*- coding: utf-8 -*-
# @Time : 2020/12/1 22:48
# @Author : dly
# @File : 217.py
# @Desc :
class Solution(object):
def containsDuplicate(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
if len(nums) == len(set(nums)):
return False
else:
return True
| [
"[email protected]"
] | |
a7e95404a37e153e6cdf62771caf3112de8d8ed3 | 08a3833ec97e33c4a40bf1d1aa403da7836e0df0 | /demo/urls.py | d4a707b0083f84bce11751e79741ea699a02c9cf | [] | no_license | srikanthpragada/PYTHON_02_AUG_2018_WEBDEMO | ae667e154abcc05acaaf0d18d45be4ebc995c6cc | e34019102f5c8159beef35a2e3665028aea509ce | refs/heads/master | 2020-03-28T06:10:43.303938 | 2018-09-27T12:33:35 | 2018-09-27T12:33:35 | 147,818,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,225 | py | from django.contrib import admin
from django.urls import path
from . import views
from . import dept_views
from . import book_views
from . import ajax_views
from . import rest_views
from . import class_views
urlpatterns = [
path('product/', views.show_product),
path('products/', views.show_products_list),
path('netprice/', views.net_price_calculator),
path('netprice2/', views.net_price),
path('netprice3/', views.net_price_with_form),
path('add_dept/', dept_views.add_dept),
path('list_depts/', dept_views.list_depts),
path('book_home/', book_views.book_home),
path('add_book/', book_views.add_book),
path('list_books/', book_views.list_books),
path('search_books/', book_views.search_books),
path('ajax/', ajax_views.ajax_demo),
path('ajax_now/', ajax_views.now),
path('ajax_get_title/', ajax_views.get_title),
path('ajax_get_book/', ajax_views.get_book),
path('list_langs/', views.list_langs),
path('about/', class_views.AboutView.as_view()),
path('books/', class_views.BooksList.as_view()),
path('api/books/', rest_views.list_books),
path('api/books/<int:bookid>', rest_views.process_book),
path('rest_client/', rest_views.client),
]
| [
"[email protected]"
] | |
c8f45e7bb4f13bb13d66ece65007b8bf2f16e966 | c7f7600a703cb5a7a7cab8c0d2a03f125d8208c8 | /spektral/layers/pooling.py | f4ee6dd6a66d634a3e6f9858f8a6b6efec6b189d | [
"MIT"
] | permissive | XIAOLONG-YUN/spektral | 72a0a9bef576ecec6347e720c5651c691b287f7d | a3883117b16b958e2e24723afc6885fbc7df397a | refs/heads/master | 2020-05-31T17:34:51.764888 | 2019-05-31T10:10:41 | 2019-05-31T10:10:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,887 | py | from keras import backend as K
from keras import regularizers, constraints, initializers
from keras.backend import tf
from keras.engine import Layer
################################################################################
# Global pooling layers
################################################################################
class GlobalSumPool(Layer):
"""
A global sum pooling layer. Pools a graph by computing the sum of its node
features.
**Mode**: single, mixed, batch, graph batch.
**Input**
- node features of shape `(n_nodes, n_features)` (with optional `batch`
dimension);
- (optional) graph IDs of shape `(n_nodes, )` (graph batch mode);
**Output**
- tensor like node features, but without node dimension (except for single
mode, where the node dimension is preserved and set to 1).
**Arguments**
None.
"""
def __init__(self, **kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(GlobalSumPool, self).__init__(**kwargs)
self.supports_masking = True
def build(self, input_shape):
if isinstance(input_shape, list) and len(input_shape) == 2:
self.data_mode = 'graph'
else:
if len(input_shape) == 2:
self.data_mode = 'single'
else:
self.data_mode = 'batch'
super(GlobalSumPool, self).build(input_shape)
def call(self, inputs):
if self.data_mode == 'graph':
X = inputs[0]
I = inputs[1]
else:
X = inputs
if self.data_mode == 'graph':
return tf.segment_sum(X, I)
else:
return K.sum(X, axis=-2, keepdims=(self.data_mode == 'single'))
def compute_output_shape(self, input_shape):
if self.data_mode == 'single':
return (1, ) + input_shape[-1:]
elif self.data_mode == 'batch':
return input_shape[:-2] + input_shape[-1:]
else:
return input_shape[0] # Input shape is a list of shapes for X and I
def get_config(self):
return super(GlobalSumPool, self).get_config()
class GlobalAvgPool(Layer):
"""
An average pooling layer. Pools a graph by computing the average of its node
features.
**Mode**: single, mixed, batch, graph batch.
**Input**
- node features of shape `(n_nodes, n_features)` (with optional `batch`
dimension);
- (optional) graph IDs of shape `(n_nodes, )` (graph batch mode);
**Output**
- tensor like node features, but without node dimension (except for single
mode, where the node dimension is preserved and set to 1).
**Arguments**
None.
"""
def __init__(self, **kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(GlobalAvgPool, self).__init__(**kwargs)
self.supports_masking = True
def build(self, input_shape):
if isinstance(input_shape, list) and len(input_shape) == 2:
self.data_mode = 'graph'
else:
if len(input_shape) == 2:
self.data_mode = 'single'
else:
self.data_mode = 'batch'
super(GlobalAvgPool, self).build(input_shape)
def call(self, inputs):
if self.data_mode == 'graph':
X = inputs[0]
I = inputs[1]
else:
X = inputs
if self.data_mode == 'graph':
return tf.segment_mean(X, I)
else:
return K.mean(X, axis=-2, keepdims=(self.data_mode == 'single'))
def compute_output_shape(self, input_shape):
if self.data_mode == 'single':
return (1, ) + input_shape[-1:]
elif self.data_mode == 'batch':
return input_shape[:-2] + input_shape[-1:]
else:
return input_shape[0]
def get_config(self):
return super(GlobalAvgPool, self).get_config()
class GlobalMaxPool(Layer):
"""
A max pooling layer. Pools a graph by computing the maximum of its node
features.
**Mode**: single, mixed, batch, graph batch.
**Input**
- node features of shape `(n_nodes, n_features)` (with optional `batch`
dimension);
- (optional) graph IDs of shape `(n_nodes, )` (graph batch mode);
**Output**
- tensor like node features, but without node dimension (except for single
mode, where the node dimension is preserved and set to 1).
**Arguments**
None.
"""
def __init__(self, **kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(GlobalMaxPool, self).__init__(**kwargs)
self.supports_masking = True
def build(self, input_shape):
if isinstance(input_shape, list) and len(input_shape) == 2:
self.data_mode = 'graph'
else:
if len(input_shape) == 2:
self.data_mode = 'single'
else:
self.data_mode = 'batch'
super(GlobalMaxPool, self).build(input_shape)
def call(self, inputs):
if self.data_mode == 'graph':
X = inputs[0]
I = inputs[1]
else:
X = inputs
if self.data_mode == 'graph':
return tf.segment_max(X, I)
else:
return K.max(X, axis=-2, keepdims=(self.data_mode == 'single'))
def compute_output_shape(self, input_shape):
if self.data_mode == 'single':
return (1, ) + input_shape[-1:]
elif self.data_mode == 'batch':
return input_shape[:-2] + input_shape[-1:]
else:
return input_shape[0]
def get_config(self):
return super(GlobalMaxPool, self).get_config()
class GlobalAttentionPool(Layer):
"""
A gated attention global pooling layer as presented by
[Li et al. (2017)](https://arxiv.org/abs/1511.05493).
**Mode**: single, mixed, batch, graph batch.
**Input**
- node features of shape `(n_nodes, n_features)` (with optional `batch`
dimension);
- (optional) graph IDs of shape `(n_nodes, )` (graph batch mode);
**Output**
- tensor like node features, but without node dimension (except for single
mode, where the node dimension is preserved and set to 1), and last
dimension changed to `channels`.
**Arguments**
- `channels`: integer, number of output channels;
- `bias_initializer`: initializer for the bias vector;
- `kernel_regularizer`: regularization applied to the kernel matrix;
- `bias_regularizer`: regularization applied to the bias vector;
- `activity_regularizer`: regularization applied to the output;
- `kernel_constraint`: constraint applied to the kernel matrix;
- `bias_constraint`: constraint applied to the bias vector.
"""
def __init__(self, channels=32,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(GlobalAttentionPool, self).__init__(**kwargs)
self.channels = channels
self.kernel_initializer = initializers.get(kernel_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
def build(self, input_shape):
assert len(input_shape) >= 2
if isinstance(input_shape, list) and len(input_shape) == 2:
self.data_mode = 'graph'
F = input_shape[0][-1]
else:
if len(input_shape) == 2:
self.data_mode = 'single'
else:
self.data_mode = 'batch'
F = input_shape[-1]
self.lg_kernel = self.add_weight(shape=(F, self.channels),
name='LG_kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.lg_bias = self.add_weight(shape=(self.channels, ),
name='LG_bias',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
self.attn_kernel = self.add_weight(shape=(F, self.channels),
name='attn_kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.attn_bias = self.add_weight(shape=(self.channels,),
name='attn_bias',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
self.built = True
def call(self, inputs):
if self.data_mode == 'graph':
X, I = inputs
else:
X = inputs
inputs_linear = K.dot(X, self.lg_kernel) + self.lg_bias
attn_map = K.dot(X, self.attn_kernel) + self.attn_bias
attn_map = K.sigmoid(attn_map)
masked_inputs = inputs_linear * attn_map
if self.data_mode in {'single', 'batch'}:
output = K.sum(masked_inputs, axis=-2, keepdims=self.data_mode=='single')
else:
output = tf.segment_sum(masked_inputs, I)
return output
def compute_output_shape(self, input_shape):
if self.data_mode == 'single':
return (1,) + (self.channels, )
elif self.data_mode == 'batch':
return input_shape[:-2] + (self.channels, )
else:
# Input shape is a list of shapes for X and I
output_shape = input_shape[0]
output_shape = output_shape[:-1] + (self.channels, )
return output_shape
def get_config(self):
config = {}
base_config = super(GlobalAttentionPool, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class GlobalAttnSumPool(Layer):
"""
A node-attention global pooling layer. Pools a graph by learning attention
coefficients to sum node features.
**Mode**: single, mixed, batch, graph batch.
**Input**
- node features of shape `(n_nodes, n_features)` (with optional `batch`
dimension);
- (optional) graph IDs of shape `(n_nodes, )` (graph batch mode);
**Output**
- tensor like node features, but without node dimension (except for single
mode, where the node dimension is preserved and set to 1).
**Arguments**
- `attn_kernel_initializer`: initializer for the attention kernel matrix;
- `kernel_regularizer`: regularization applied to the kernel matrix;
- `attn_kernel_regularizer`: regularization applied to the attention kernel
matrix;
- `attn_kernel_constraint`: constraint applied to the attention kernel
matrix;
"""
def __init__(self,
attn_kernel_initializer='glorot_uniform',
kernel_regularizer=None,
attn_kernel_regularizer=None,
attn_kernel_constraint=None,
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(GlobalAttnSumPool, self).__init__(**kwargs)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.attn_kernel_initializer = initializers.get(attn_kernel_initializer)
self.attn_kernel_regularizer = regularizers.get(attn_kernel_regularizer)
self.attn_kernel_constraint = constraints.get(attn_kernel_constraint)
def build(self, input_shape):
assert len(input_shape) >= 2
if isinstance(input_shape, list) and len(input_shape) == 2:
self.data_mode = 'graph'
F = input_shape[0][-1]
else:
if len(input_shape) == 2:
self.data_mode = 'single'
else:
self.data_mode = 'batch'
F = input_shape[-1]
# Attention kernels
self.attn_kernel = self.add_weight(shape=(F, 1),
initializer=self.attn_kernel_initializer,
regularizer=self.attn_kernel_regularizer,
constraint=self.attn_kernel_constraint,
name='attn_kernel')
self.built = True
def call(self, inputs):
if self.data_mode == 'graph':
X, I = inputs
else:
X = inputs
attn_coeff = K.dot(X, self.attn_kernel)
attn_coeff = K.squeeze(attn_coeff, -1)
attn_coeff = K.softmax(attn_coeff)
if self.data_mode == 'single':
output = K.dot(attn_coeff[None, ...], X)
elif self.data_mode == 'batch':
output = K.batch_dot(attn_coeff, X)
else:
output = attn_coeff[:, None] * X
output = tf.segment_sum(output, I)
return output
def compute_output_shape(self, input_shape):
if self.data_mode == 'single':
return (1,) + input_shape[-1:]
elif self.data_mode == 'batch':
return input_shape[:-2] + input_shape[-1:]
else:
return input_shape[0]
def get_config(self):
config = {}
base_config = super(GlobalAttnSumPool, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| [
"[email protected]"
] | |
30675a30aad98c9bf2aec0cb940be25c5c0449d1 | 2cde0b63638ac8bad6a2470a4ec1cbbdae8f7d39 | /percentilesandquartiles17.py | 9066c301ed8954ed980881cb9437211cbe66d357 | [] | no_license | PhuongThuong/baitap | cd17b2f02afdbebfe00d0c33c67791ade4fc61ff | 012c609aa99ba587c492b8e6096e15374dc905f2 | refs/heads/master | 2023-08-22T21:39:57.527349 | 2021-10-09T10:54:48 | 2021-10-09T10:54:48 | 415,281,667 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 775 | py | import numpy as np
scores = np.array([8, 6, 4, 3, 9, 4, 7, 4, 4, 9, 7, 3, 9, 4, 2, 3, 8, 5, 9, 6])
print("Bách phân vị thứ 70: ", np.percentile(scores, 70, interpolation='lower'))
print("Bách phân vị thứ 70: ", np.percentile(scores, 70, interpolation='higher'))
print("Bách phân vị thứ 70: ", np.percentile(scores, 70, interpolation='linear'))
print("Bách phân vị thứ 70: ", np.percentile(scores, 70, interpolation='nearest'))
print("Bách phân vị thứ 70: ", np.percentile(scores, 70, interpolation='midpoint'))
print("Bách phân vị thứ 50: ", np.percentile(scores, 50))
print("Median = ", np.median(scores))
print("Q1 = : ", np.quantile(scores, 0.25))
print("Q2 = : ", np.quantile(scores, 0.5))
print("Q3 = : ", np.quantile(scores, 0.75)) | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.