blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
84e7d9a493e30b3985e39ae5126bbaeeca00239c | 82c73b70c2002f647bdc254125f0bdb18f0b79d2 | /openstack_dashboard/dashboards/admin/license/forms.py | 099ccaf8c180726a41000b7ec3cdcf950aee7eaa | [
"Apache-2.0"
] | permissive | xuweiliang/Codelibrary | cfb5755ced54c65cacdb3e35ab2b98385f8d5f8e | 54e45b2daa205132c05b0ff5a2c3db7fca2853a7 | refs/heads/master | 2021-05-04T00:31:42.025238 | 2018-03-20T07:05:20 | 2018-03-20T07:05:20 | 71,852,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,302 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import pytz
import random, string
import re
import sys
import operator
from django.core.urlresolvers import reverse
from openstack_dashboard import api
from django import shortcuts
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import forms
from horizon import messages
from datetime import datetime as time
from horizon import exceptions
from horizon.utils import validators
from django.utils import encoding
from subprocess import PIPE,Popen
from django.http import HttpResponseRedirect
from openstack_dashboard import record_action
LOG = logging.getLogger(__name__)
class LicenseRegisterForm(forms.SelfHandlingForm):
licence_heip=_("Please enter the serial number")
cryptogram_help=_("If you need a new certificate,\
please send your service provider the Cryptogram")
encrypted_license = forms.CharField(widget=forms.widgets.Textarea,
label=_("Input licence"),
help_text = licence_heip,
required=True)
system_uuid = forms.CharField(label=_("Cryptogram"),
widget=forms.TextInput(attrs={'readonly': 'readonly'}),
help_text = cryptogram_help)
def __init__(self, request, *args, **kwargs):
super(LicenseRegisterForm, self).__init__(request, *args, **kwargs)
def handle(self, request, data):
try:
licence = data.get('encrypted_license', None).strip()
systemUUID = data.get('system_uuid')
UUID, key = systemUUID.split(":")
decoded_string = eval(api.authcode.AuthCode.decode(licence, UUID.strip()))
during = decoded_string.get('during', None)
num = decoded_string.get('num', None)
authcode = decoded_string.get('uuid')
uuid, pwd = authcode.split(":")
licenceTime = decoded_string.get('time', None)
if uuid != UUID:
messages.error(request,
encoding.force_unicode(_("Serial number can only activate\
the specified server")))
api.nova.systemlogs_create(request, '-',\
record_action.REGISTERLICENSE,
result=False, detail=_("Licence Register Fail"))
return HttpResponseRedirect('/dashboard/admin/license')
date = time.strptime(licenceTime, '%Y-%m-%dT%H:%M:%S.%f')
starttime = time.strftime(date, '%Y-%m-%d %H:%M:%S')
apartDays =(time.now()- date).days
if during > 0 and apartDays < 3 and num > 0:
kwargs={'licence':{'starttime':starttime,
'system_uuid':authcode,
'encrypted_license':licence,
'disabled':False}}
try:
api.nova.update_licence(request, **kwargs)
msg = _("Serial number authentication success")
messages.success(request,
encoding.force_unicode(msg))
api.nova.systemlogs_create(request, '-',\
record_action.REGISTERLICENSE,
result=True, detail=msg)
return True
except Exception as e:
exceptions.handle(request,
encoding.force_unicode(_("%s", e)))
api.nova.systemlogs_create(request, '-',\
record_action.REGISTERLICENSE,
result=False, detail=_("Licence Register Fail"))
return False
else:
messages.error(request,
encoding.force_unicode(_("Serial number expired or invalid")))
api.nova.systemlogs_create(request, '-',\
record_action.REGISTERLICENSE,
result=False, detail=_("Licence invalid"))
return HttpResponseRedirect('/dashboard/admin/license')
except Exception as e:
exceptions.handle(request,
encoding.force_unicode(_("Invalid serial number \
or registration error %s" % e)))
api.nova.systemlogs_create(request, '-',\
record_action.REGISTERLICENSE,
result=True, detail=_("Licence Register Success"))
return True
| [
"[email protected]"
] | |
b5b156cdd0e5a59512ec09150e8dfd07ed2350af | 3ba8bf9fb1a3d54233e927893bc2d3865692c896 | /ophyd/controls/ophydobj.py | c94c640ca165b825b3e5f70be5d377a4f711108f | [] | no_license | NSLS-II-CSX/ophyd | 68fee01393d819350270b143457f76a4a5ccf703 | aadf6197f7a3b1ba907e48a73d5af8b07b7c57ad | refs/heads/master | 2021-01-17T21:19:16.867892 | 2014-12-13T23:39:40 | 2014-12-13T23:39:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,022 | py | # vi: ts=4 sw=4
'''
:mod:`ophyd.control.ophydobj` - Base object type
================================================
.. module:: ophyd.control.ophydobj
:synopsis:
'''
from __future__ import print_function
from ..session import register_object
class OphydObject(object):
_default_sub = None
def __init__(self, name, alias, register=True):
'''
Subscription/callback mechanism for registered objects in ophyd sessions.
'''
self._name = name
self._alias = alias
self._subs = dict((getattr(self, sub), []) for sub in dir(self)
if sub.startswith('SUB_'))
self._sub_cache = {}
self._ses_logger = None
if register:
self._register()
def _run_sub(self, cb, *args, **kwargs):
'''
Run a single subscription callback
:param cb: The callback
'''
try:
cb(*args, **kwargs)
except Exception as ex:
sub_type = kwargs['sub_type']
self._ses_logger.error('Subscription %s callback exception (%s)' %
(sub_type, self), exc_info=ex)
def _run_cached_sub(self, sub_type, cb):
'''
Run a single subscription callback using the most recent
cached arguments
:param sub_type: The subscription type
:param cb: The callback
'''
try:
args, kwargs = self._sub_cache[sub_type]
except KeyError:
pass
else:
# Cached kwargs includes sub_type
self._run_sub(cb, *args, **kwargs)
def _run_subs(self, *args, **kwargs):
'''
Run a set of subscription callbacks
Only the kwarg :param:`sub_type` is required, indicating
the type of callback to perform. All other positional arguments
and kwargs are passed directly to the callback function.
No exceptions are raised when the callback functions fail;
they are merely logged with the session logger.
'''
sub_type = kwargs['sub_type']
# Shallow-copy the callback arguments for replaying the
# callback at a later time (e.g., when a new subscription is made)
self._sub_cache[sub_type] = (tuple(args), dict(kwargs))
for cb in self._subs[sub_type]:
self._run_sub(cb, *args, **kwargs)
def subscribe(self, cb, event_type=None, run=True):
'''
Subscribe to events this signal group emits
See also :func:`clear_sub`
:param callable cb: A callable function (that takes kwargs)
to be run when the event is generated
:param event_type: The name of the event to subscribe to (if None,
defaults to SignalGroup._default_sub)
:type event_type: str or None
:param bool run: Run the callback now
'''
if event_type is None:
event_type = self._default_sub
self._subs[event_type].append(cb)
if run:
self._run_cached_sub(event_type, cb)
def clear_sub(self, cb, event_type=None):
'''
Remove a subscription, given the original callback function
See also :func:`subscribe`
:param callable callback: The callback
:param event_type: The event to unsubscribe from (if None, removes it
from all event types)
:type event_type: str or None
'''
if event_type is None:
for event_type, cbs in self._subs.items():
try:
cbs.remove(cb)
except ValueError:
pass
else:
self._subs[event_type].remove(cb)
def _register(self):
'''
Register this object with the session
'''
register_object(self)
@property
def name(self):
return self._name
@property
def alias(self):
'''
An alternative name for the signal
'''
return self._alias
| [
"[email protected]"
] | |
a2c709a82b9f707add011e641eedf079ded0b67e | 95f9c734c4bf5de8e5d0adff9ac2cf0228df75ac | /Django/mysite-bak/mysite/polls/views.py | da0e946bc8b3ef8aea7fb16256eecbdb21caf692 | [] | no_license | holen/Python | 7a996b13ff2224084397223879c380169d47ff8c | 506fff291d6e9c6f80c30a51cc3b77e9dd048468 | refs/heads/master | 2022-12-12T22:12:51.561716 | 2019-10-16T03:08:00 | 2019-10-16T03:08:00 | 14,278,665 | 1 | 0 | null | 2022-12-08T00:51:26 | 2013-11-10T15:29:59 | Python | UTF-8 | Python | false | false | 2,090 | py | from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from polls.models import Question, Choice
from django.template import RequestContext, loader
from django.http import Http404
from django.shortcuts import get_object_or_404, render
from django.core.urlresolvers import reverse
# Create your views here.
def index(request):
latest_question_list = Question.objects.order_by('-pub_date')[:5]
context = {'latest_question_list': latest_question_list}
return render(request, 'polls/index.html', context)
# template = loader.get_template('polls/index.html')
#context = RequestContext(request, {
# 'latest_question_list': latest_question_list,
# })
#return HttpResponse(template.render(context))
#output = ','.join([p.question_text for p in latest_question_list])
#return HttpResponse(output)
#return HttpResponse("Hello, world. You're at the polls index.")
def detail(request, question_id):
#try:
# question = Question.objects.get(pk=question_id)
# except Question.DoesNotExist:
# raise Http404
question = get_object_or_404(Question, pk=question_id)
return render(request, 'polls/detail.html', {'question':question})
#return HttpResponse("You're looking at question %s." % question_id)
def results(request, question_id):
#response = "You're looking at the results of question %s."
#return HttpResponse(response % question_id)
question = get_object_or_404(Question, pk=question_id)
return render(request, 'polls/results.html', {'question': question})
def vote(request, question_id):
p = get_object_or_404(Question, pk=question_id)
try:
selected_choice = p.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
return render(request, 'polls/detail.html', {
'question': p,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
return HttpResponseRedirect(reverse('polls:results', args=(p.id,)))
#return HttpResponse("You're voting on question %s." % question_id)
| [
"[email protected]"
] | |
7fa86d8b542bd91ae527f605694388c5e6e58d35 | 5608a9cd3bec8cab1c3f9d7f42896107b78593cc | /tests/unit/mock_docker/fake_stat.py | b85bc3b8007c8c020d9650a655a3cbcd34fdafe7 | [
"Apache-2.0"
] | permissive | troyready/runway | cdee6d94f42173c8aa0bd414620b68be36a510aa | 4fd299961a4b73df39e14f4f19a7236f7be17dd8 | refs/heads/master | 2021-06-18T16:05:30.712211 | 2021-01-14T01:44:32 | 2021-01-14T01:44:32 | 151,314,626 | 0 | 0 | Apache-2.0 | 2018-10-02T19:55:09 | 2018-10-02T19:55:08 | null | UTF-8 | Python | false | false | 2,990 | py | """Stats for fake Docker API."""
OBJ = {
"read": "2015-02-11T19:20:46.667237763+02:00",
"network": {
"rx_bytes": 567224,
"rx_packets": 3773,
"rx_errors": 0,
"rx_dropped": 0,
"tx_bytes": 1176,
"tx_packets": 13,
"tx_errors": 0,
"tx_dropped": 0,
},
"cpu_stats": {
"cpu_usage": {
"total_usage": 157260874053,
"percpu_usage": [52196306950, 24118413549, 53292684398, 27653469156],
"usage_in_kernelmode": 37140000000,
"usage_in_usermode": 62140000000,
},
"system_cpu_usage": 3.0881377e14,
"throttling_data": {"periods": 0, "throttled_periods": 0, "throttled_time": 0},
},
"memory_stats": {
"usage": 179314688,
"max_usage": 258166784,
"stats": {
"active_anon": 90804224,
"active_file": 2195456,
"cache": 3096576,
"hierarchical_memory_limit": 1.844674407371e19,
"inactive_anon": 85516288,
"inactive_file": 798720,
"mapped_file": 2646016,
"pgfault": 101034,
"pgmajfault": 1207,
"pgpgin": 115814,
"pgpgout": 75613,
"rss": 176218112,
"rss_huge": 12582912,
"total_active_anon": 90804224,
"total_active_file": 2195456,
"total_cache": 3096576,
"total_inactive_anon": 85516288,
"total_inactive_file": 798720,
"total_mapped_file": 2646016,
"total_pgfault": 101034,
"total_pgmajfault": 1207,
"total_pgpgin": 115814,
"total_pgpgout": 75613,
"total_rss": 176218112,
"total_rss_huge": 12582912,
"total_unevictable": 0,
"total_writeback": 0,
"unevictable": 0,
"writeback": 0,
},
"failcnt": 0,
"limit": 8039038976,
},
"blkio_stats": {
"io_service_bytes_recursive": [
{"major": 8, "minor": 0, "op": "Read", "value": 72843264},
{"major": 8, "minor": 0, "op": "Write", "value": 4096},
{"major": 8, "minor": 0, "op": "Sync", "value": 4096},
{"major": 8, "minor": 0, "op": "Async", "value": 72843264},
{"major": 8, "minor": 0, "op": "Total", "value": 72847360},
],
"io_serviced_recursive": [
{"major": 8, "minor": 0, "op": "Read", "value": 10581},
{"major": 8, "minor": 0, "op": "Write", "value": 1},
{"major": 8, "minor": 0, "op": "Sync", "value": 1},
{"major": 8, "minor": 0, "op": "Async", "value": 10581},
{"major": 8, "minor": 0, "op": "Total", "value": 10582},
],
"io_queue_recursive": [],
"io_service_time_recursive": [],
"io_wait_time_recursive": [],
"io_merged_recursive": [],
"io_time_recursive": [],
"sectors_recursive": [],
},
}
| [
"[email protected]"
] | |
b4e60d4747db4b3823750c69fecee8522989f43d | afc8d5a9b1c2dd476ea59a7211b455732806fdfd | /Configurations/ControlRegions/SS/2016HIPM_v9/variables.py | b38df10fccc279186d7b3cf5a9b34a05e48c2e2e | [] | no_license | latinos/PlotsConfigurations | 6d88a5ad828dde4a7f45c68765081ed182fcda21 | 02417839021e2112e740607b0fb78e09b58c930f | refs/heads/master | 2023-08-18T20:39:31.954943 | 2023-08-18T09:23:34 | 2023-08-18T09:23:34 | 39,819,875 | 10 | 63 | null | 2023-08-10T14:08:04 | 2015-07-28T07:36:50 | Python | UTF-8 | Python | false | false | 3,120 | py | # variables
# 0 = not fold (default), 1 = fold underflowbin, 2 = fold overflow bin, 3 = fold underflow and overflow
#variables = {}
#variables['nvtx'] = { 'name': 'PV_npvsGood',
# 'range' : (20,0,100),
# 'xaxis' : 'nvtx',
# 'fold' : 3
# }
variables['mllpeak'] = {
'name': 'mll',
'range' : (20,80,100),
'xaxis' : 'm_{ll} [GeV]',
'fold' : 0
}
variables['ptll'] = {
'name': 'ptll',
'range' : (20, 0,200),
'xaxis' : 'p_{T}^{ll} [GeV]',
'fold' : 0
}
variables['ptll_more'] = {
'name': 'ptll',
'range' : (50, 0,100),
'xaxis' : 'p_{T}^{ll} [GeV]',
'fold' : 0
}
variables['pt1'] = {
'name': 'Lepton_pt[0]',
'range' : (20,0,200),
'xaxis' : 'p_{T} 1st lep',
'fold' : 3
}
variables['pt1_v7'] = {
'name': 'Lepton_pt[0]',
'range' : (20,20,100),
'xaxis' : 'p_{T} 1st lep',
'fold' : 3
}
variables['pt2'] = {
'name': 'Lepton_pt[1]',
'range' : (20,0,100),
'xaxis' : 'p_{T} 2nd lep',
'fold' : 3
}
variables['eta1'] = {
'name': 'Lepton_eta[0]',
'range' : (20,-3,3),
'xaxis' : '#eta 1st lep',
'fold' : 3
}
variables['eta2'] = {
'name': 'Lepton_eta[1]',
'range' : (20,-3,3),
'xaxis' : '#eta 2nd lep',
'fold' : 3
}
variables['phi1'] = {
'name': 'Lepton_phi[0]',
'range' : (20,-3.2,3.2),
'xaxis' : '#phi 1st lep',
'fold' : 3
}
variables['phi2'] = {
'name': 'Lepton_phi[1]',
'range' : (20,-3.2,3.2),
'xaxis' : '#phi 2nd lep',
'fold' : 3
}
variables['puppimet'] = {
'name': 'PuppiMET_pt',
'range' : (20,0,100),
'xaxis' : 'puppimet [GeV]',
'fold' : 3
}
variables['njet'] = {
'name': 'Sum$(CleanJet_pt>30)',
'range' : (5,0,5),
'xaxis' : 'Number of jets',
'fold' : 2
}
variables['jetpt1'] = {
'name': '(Sum$(CleanJet_pt>30)>0)*(Alt$(CleanJet_pt[0], 0)) - (Sum$(CleanJet_pt>30)==0)*99',
'range' : (20,0,200),
'xaxis' : 'p_{T} 1st jet',
'fold' : 0
}
variables['jetpt2'] = {
'name': '(Sum$(CleanJet_pt>30)>1)*(Alt$(CleanJet_pt[1], 0)) - (Sum$(CleanJet_pt>30)<=1)*99',
'range' : (20,0,200),
'xaxis' : 'p_{T} 2nd jet',
'fold' : 0
}
variables['jeteta1'] = {
'name': '(Sum$(CleanJet_pt>30)>0)*(Alt$(CleanJet_eta[0], 0)) - (Sum$(CleanJet_pt>30)==0)*99',
'range' : (20,-5.0,5.0),
'xaxis' : '#eta 1st jet',
'fold' : 0
}
variables['jeteta2'] = {
'name': '(Sum$(CleanJet_pt>30)>1)*(Alt$(CleanJet_eta[1], 0)) - (Sum$(CleanJet_pt>30)<=1)*99',
'range' : (20,-5.0,5.0),
'xaxis' : '#eta 2nd jet',
'fold' : 0
}
variables['trkMet'] = {
'name': 'TkMET_pt',
'range' : (20,0,200),
'xaxis' : 'trk met [GeV]',
'fold' : 3
}
variables['mpmet'] = {
'name': 'mpmet',
'range' : (20,0,200),
'xaxis' : 'min proj met [GeV]',
'fold' : 3
}
| [
"[email protected]"
] | |
ff9d10c84448a1f47b8ab9374247b004431a8c67 | 766da3ffcbd26e7f58d711f5b0e7312cb365e9fb | /layers/transformer/__init__.py | 9950eabee3288f70b34f0e30bc81b80b808266ca | [
"MIT"
] | permissive | RobertCsordas/ndr | 1277b353eb61267e023b671072730bdc7e779ca5 | da20530dfb4336deddfbe5e79d62e72d1dc2580e | refs/heads/master | 2023-09-02T22:38:57.601098 | 2021-11-19T09:52:23 | 2021-11-19T09:52:23 | 414,588,414 | 20 | 4 | null | null | null | null | UTF-8 | Python | false | false | 489 | py | from .transformer import Transformer, AttentionMask
from .relative_transformer import RelativeTransformer
from .universal_transformer import UniversalTransformer
from .universal_relative_transformer import UniversalRelativeTransformer
from .transformer import TransformerEncoderWithLayer
from .relative_transformer import RelativeTransformerEncoderLayer
from .universal_transformer import UniversalTransformerEncoderWithLayer
from .universal_transformer import UniversalTransformerEncoder
| [
"[email protected]"
] | |
476654bc49e7272f77917b445426eaf80a1cee93 | 35efa105f00073303284103699fcaec54045a8a4 | /invoke_servers.py | d3bd5d43949a4ac06d60f0cf6a4d3fb25c6e0e22 | [] | no_license | Quantumke/mpesaapi | b12c7e663fc89d6b98170a229b7876fdb5c1541f | e0941927b194d361f443aa8cf665fc3cfce71bca | refs/heads/master | 2021-01-19T00:25:47.958078 | 2016-05-27T09:19:27 | 2016-05-27T09:19:27 | 59,822,028 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 770 | py | import urllib.parse
import urllib.request
import ssl
def invoke_server():
url="https://safaricom.co.ke/mpesa_online/lnmo_checkout_server.php?wsdl"
values = ({
"MERCHANT_TRANSACTION_ID": "1",
"PASSWORD": "1",
"REFERENCE_ID" :'1',
"TIMESTAMP":'1461164700'
})
find_a_better_way = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
#data = urllib.parse.urlencode(values)
#data = data.encode('utf-8')
data = urllib.urlencode({'MERCHANT_TRANSACTION_ID': '1', 'PASSWORD': '10', 'REFERENCE_ID':'10','TIMESTAMP':'1461164700'})
req = urllib.request.Request(url , data, headers={'User-Agent': 'Mozilla/5.0'})
resp = urllib.request.urlopen(req, context=find_a_better_way)
respData = resp.read()
print(respData)
if __name__ == "__main__":
invoke_server()
| [
"[email protected]"
] | |
e1da3255668999c3b77aa8c9332b197a9203478e | 42104a0ebfc03caf9c7648211ca6ac69de984cbb | /memedata/errors.py | 1a923ec91aa865271c3a85ad8fe34bf5b1120fc9 | [
"MIT"
] | permissive | bomdiabot/memedata-service | 61fbdf00d32fb9ef55f352aa400c8830588b8cfe | 52c772c1440901fd23bc8d5254d1f16d11d6c629 | refs/heads/dev | 2022-12-09T13:46:29.588250 | 2018-10-29T23:31:30 | 2018-10-29T23:31:30 | 147,910,641 | 2 | 0 | MIT | 2022-12-08T02:53:13 | 2018-09-08T07:09:59 | Python | UTF-8 | Python | false | false | 1,818 | py | from marshmallow import ValidationError
from werkzeug.exceptions import HTTPException
from flask_jwt_extended.exceptions import JWTExtendedException
from memedata.util import mk_errors
from memedata import config
def jwt_error_handler(error):
code = 401
messages = list(getattr(error, 'args', []))
return mk_errors(code, messages)
def http_error_handler(error):
resp = error.response
if resp is None:
code = error.code
messages = [error.description]
else:
code = getattr(resp, 'status_code', 500)
json = resp.get_json()
if 'errors' in json and json['errors']:
messages = [e['message'] for e in json['errors'] if 'message' in e]
else:
messages = [str(resp.status)]
return mk_errors(code, messages)
def validation_error_handler(error):
code = getattr(error, 'status_code', 500)
messages = getattr(error, 'messages', [])
return mk_errors(code, messages)
def generic_error_handler(error):
code = getattr(error, 'status_code', 500)
if config.debug:
messages = [str(error)]
else:
messages = ['something went wrong!']
return mk_errors(code, messages)
def error_handler(error):
try:
if isinstance(error, JWTExtendedException):
return jwt_error_handler(error)
elif isinstance(error, HTTPException):
return http_error_handler(error)
elif isinstance(error, ValidationError):
return validation_error_handler(error)
else:
return generic_error_handler(error)
except:
return mk_errors(500, 'something went wrong!')
def register_handlers(app):
app.errorhandler(Exception)(error_handler)
app.errorhandler(HTTPException)(error_handler)
app.handle_user_exception = error_handler
| [
"[email protected]"
] | |
31abd5ef24459b6c62af258b25f1201638b650b3 | 8e1d05d2e130e30585a08b54ce8d613ee78d6b8c | /Evaluation/modularity_analysis.py | 93c04dc25880df6da1ce71526c29416018e92e39 | [] | no_license | mturja-vf-ic-bd/AD-Longitudinal-Smoothing | 11793fafcfb039629a5a5265d98789a1e1576de2 | 10fafdd57c63cc7b5a1c0400abdbfaa4353254f3 | refs/heads/master | 2020-04-16T17:09:14.790401 | 2019-08-17T17:05:54 | 2019-08-17T17:05:54 | 165,764,720 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,380 | py | # Modularity Analysis of the raw and smooth connectomes
# The goal is to show that our method produce connectomes that has consistent modularity along time
#from bct import community_louvain
import numpy as np
from utils.readFile import readSubjectFiles
from plot_functions import plot_matrix_all, plot_community_structure_variation
from math import log
from utils.helper import get_subject_names
from collections import Counter
def match_list(l1, l2):
return len(set(l1).intersection(set(l2)))
def compare_community_structure(c1, c2):
matching = {}
if len(c1) < len(c2):
c1, c2 = c2, c1
for i in range(len(c1)):
max = -1
max_idx = -1
for j in range(len(c2)):
temp = match_list(c1[i], c2[j])
if temp > max:
max = temp
max_idx = j
matching[i] = max_idx
def build_longitudinal_community_structure(c_i_list):
for i, c_s in enumerate(c_i_list):
for j in range(i + 1, len(c_i_list)):
c_d = c_i_list[j]
def build_community_structure(c_i):
"""
Returns a list of list with each list representing the indices of a community
:param c_i: communitiy labels of the nodes of graph
:return: idx_ordered: nested list with each element is the indices of each community
"""
community_dict = {}
label_set = set(c_i)
for label in label_set:
idx_c = np.nonzero(c_i == label)[0]
key = min(idx_c)
community_dict[key] = idx_c
idx_ordered = []
for k in sorted(community_dict.keys()):
idx_ordered.append(list(community_dict[k]))
return idx_ordered
def sort_connectomes_by_modularity(mat, c_i=None):
"""
Sort a matrix by community structure
:param mat: adjacancy matrix (N*N)
:return: sorted adjacancy matrix (N*N)
"""
c_i, q = community_louvain(np.asarray(mat), gamma=1, ci=c_i)
com_struct = build_community_structure(c_i)
idx_ordered = []
for idx in com_struct:
idx_ordered = idx_ordered + idx
mat = mat[idx_ordered, :]
return mat[:, idx_ordered], c_i
def variation_of_information(X, Y):
n = float(sum([len(x) for x in X]))
sigma = 0.0
for x in X:
p = len(x) / n
for y in Y:
q = len(y) / n
r = len(set(x) & set(y)) / n
if r > 0.0:
sigma += r * (log(r / p, 2) + log(r / q, 2))
return abs(sigma)
def voi_between_community_structure(mat1, mat2, gamma=1):
"""
Measures normalized variation of information between two community structure
:param mat1: N*N adjcancy matrix of graph one.
:param mat2: N*N adjcancy matrix of graph two.
:return: nvoi: normalized variation of information between two community structure
"""
c_1, q = community_louvain(np.asarray(mat1), gamma=gamma)
c_2, q = community_louvain(np.asarray(mat2), gamma=gamma)
N = len(c_1)
X = build_community_structure(c_1)
Y = build_community_structure(c_2)
return variation_of_information(X, Y) / log(N, 2)
def mean_std_voi(sub_names):
voi_rw = []
voi_sm = []
for sub in sub_names:
connectome_list, smoothed_connectomes = readSubjectFiles(sub, method="row")
voi_rw = voi_rw + [voi_between_community_structure(v1, v2) for
v1 in connectome_list for v2 in connectome_list if v1 is not v2]
voi_sm = voi_sm + [voi_between_community_structure(v1, v2) for
v1 in smoothed_connectomes for v2 in smoothed_connectomes if v1 is not v2]
voi_rw_mean = np.mean(voi_rw)
voi_rw_std = np.std(voi_rw)
voi_sm_mean = np.mean(voi_sm)
voi_sm_std = np.std(voi_sm)
return voi_rw_mean, voi_rw_std, voi_sm_mean, voi_sm_std
if __name__ == '__main__':
sub_names = get_subject_names(3)
#sub_names = ["027_S_5110"]
print(mean_std_voi(sub_names))
'''
for sub in sub_names:
connectome_list, smoothed_connectomes = readSubjectFiles(sub, method="row")
connectome_list = [sort_connectomes_by_modularity(connectome) for connectome in connectome_list]
smoothed_connectomes = [sort_connectomes_by_modularity(connectome) for connectome in smoothed_connectomes]
plot_matrix_all(connectome_list, fname="raw_mod", savefig=True)
plot_matrix_all(smoothed_connectomes, fname="sm_mod", savefig=True)
'''
| [
"[email protected]"
] | |
7e59f965301071339cf8b02f1232ee076d1d8bc1 | d697c1d45e96bd440be9c17ab14243a5882b1f52 | /qianfeng/常用模块/Tkinter/Text控件.py | 720a8d02a87bfe968a9f57bcdad6841620b2607e | [] | no_license | ithjl521/python | 9eeda2e60dda97ee36e8764c06400eb12818689f | f4fe50799501c483cb64445fd05ee0f30f56576c | refs/heads/master | 2020-07-12T23:10:53.608276 | 2019-11-08T08:59:35 | 2019-11-08T08:59:35 | 204,931,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 723 | py | import tkinter
# 创建主窗口
win = tkinter.Tk()
# 设置标题
win.title('title-hjl')
# 设置大小和位置
win.geometry("400x400+200+50")
'''
文本控件,用于显示多行文本
'''
text = tkinter.Text(win,width=30,height=4)
text.pack()
str = '''
【推荐】超50万C++/C#源码: 大型实时仿真组态图形源码
【活动】阿里云910会员节多款云产品满减活动火热进行中
【推荐】新手上天翼云,数十款云产品、新一代主机0元体验
【推荐】零基础轻松玩转华为云产品,获壕礼加返百元大礼
【推荐】华为云文字识别资源包重磅上市,1元万次限时抢购'''
text.insert(tkinter.INSERT,str)
# 进入消息循环
win.mainloop()
| [
"[email protected]"
] | |
9e374836569d4aa1541f6d838fc0cf6528594d4e | 70c69365a7a5b86af601fbf071f221c85abef9fc | /tensorflow/python/tpu/api.py | 540f425494a6a23291c5ec68fa93deda8c08911a | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] | permissive | dyna999/tensorflow | 3ed8f243b8cc8bb30a8c96dbd30e4b27be226f83 | 163c946f6827ce9e3ffa49e56fa65ce520bf6ea5 | refs/heads/master | 2022-03-28T03:00:28.650532 | 2022-03-25T22:47:49 | 2022-03-25T22:51:26 | 97,944,926 | 0 | 0 | Apache-2.0 | 2022-01-07T22:43:29 | 2017-07-21T12:22:03 | C++ | UTF-8 | Python | false | false | 1,365 | py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Modules that need to be exported to the API.
List TPU modules that aren't included elsewhere here so that they can be scanned
for tf_export decorations.
"""
# pylint: disable=unused-import
from tensorflow.python.tpu import bfloat16
from tensorflow.python.tpu import feature_column_v2
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu import tpu_embedding
from tensorflow.python.tpu import tpu_embedding_for_serving
from tensorflow.python.tpu import tpu_embedding_v2
from tensorflow.python.tpu import tpu_embedding_v2_utils
from tensorflow.python.tpu import tpu_hardware_feature
from tensorflow.python.tpu import tpu_optimizer
# pylint: enable=unused-import
| [
"[email protected]"
] | |
2ee0088396c8b6299c327c0d2faefc2d53d9a962 | f0cec246e2f30f6b4ee5656f1cb6406dd0f7879a | /thingsboard_client/models/rule_node_id.py | cad448df1ff67cb7c21896a0d4480fd703c9bb39 | [] | no_license | ascentio-tech/thingsboard-swagger-client | 4e2f7c943e243ec8505c32dab0aa3d6cf1559105 | 1e8bf7664c281c29612fd5b44261f049ca7c44fd | refs/heads/master | 2021-07-20T07:18:12.969459 | 2020-06-17T02:35:54 | 2020-06-17T02:35:54 | 184,322,192 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,040 | py | # coding: utf-8
"""
Thingsboard REST API
For instructions how to authorize requests please visit <a href='http://thingsboard.io/docs/reference/rest-api/'>REST API documentation page</a>. # noqa: E501
OpenAPI spec version: 2.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class RuleNodeId(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str'
}
attribute_map = {
'id': 'id'
}
def __init__(self, id=None): # noqa: E501
"""RuleNodeId - a model defined in Swagger""" # noqa: E501
self._id = None
self.discriminator = None
if id is not None:
self.id = id
@property
def id(self):
"""Gets the id of this RuleNodeId. # noqa: E501
:return: The id of this RuleNodeId. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this RuleNodeId.
:param id: The id of this RuleNodeId. # noqa: E501
:type: str
"""
self._id = id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(RuleNodeId, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RuleNodeId):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
af566ecfbb44440a1ec164059caab18a664f4f21 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/fdlm_chordrec/chordrec-master/experiments/mlsp2016/create_crf_init_params.py | 989cab4c857b5f28322c54b30be3b0271045634f | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 1,285 | py | import numpy as np
import pickle
import os
from docopt import docopt
from glob import glob
from os.path import join, exists
USAGE = """
create_crf_init_params.py - creates initial crf parameters from a learned
gap convnet.
Usage:
create_crf_init_params.py <src_dir> <dst_dir>
Arguments:
<src_dir> directory containing the CNN parameter files for each fold
<dst_dir> directory where to store the initial CRF parameters
"""
args = docopt(USAGE)
param_files = glob(join(args['<src_dir>'], 'params*.pkl'))
if not exists(args['<dst_dir>']):
os.makedirs(args['<dst_dir>'])
for fold, pfile in enumerate(param_files):
params = pickle.load(open(pfile))
conv, beta, gamma, mean, inv_std = params[-5:]
c = (beta - mean * gamma * inv_std)
W = (conv.reshape(conv.shape[:2]) * gamma[:, np.newaxis] *
inv_std[:, np.newaxis]).T
pi = np.zeros_like(c)
tau = np.zeros_like(c)
A = np.zeros((len(beta), len(beta)))
dst_file = join(args['<dst_dir>'], 'crf_init_params_{}.pkl'.format(fold))
pickle.dump([pi.astype(np.float32),
tau.astype(np.float32),
c.astype(np.float32),
A.astype(np.float32),
W.astype(np.float32)], open(dst_file, 'w'))
| [
"[email protected]"
] | |
9f531e0eaf6f4b7c54ba7928cb1cfa50714bfe90 | 49dbac90edc329416525825e842382470345641d | /tests/medium/test_num_sum.py | 13f451161779c5dc0a928281932484afcf2ce90a | [] | no_license | AleksTor/belhard_6_tasks | 4a9ed9367978a2086531a9e31f2d1c71a17446b6 | f5f77a622bbb020a01d251423cbb5f25cc6239f3 | refs/heads/master | 2023-07-26T19:10:22.957008 | 2021-09-13T02:19:28 | 2021-09-13T02:19:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | import pytest
from tasks.medium.num_sum import sum_of_numbers
@pytest.mark.parametrize(
"n, expected", (
(123, 6),
(1111111, 7),
(4, 4),
(935145, 27),
)
)
def test_sum_of_numbers(n, expected):
assert sum_of_numbers(n) == expected
| [
"[email protected]"
] | |
ef65d624ada369ecd60fd78c94bf7905807cefb2 | 2af5f89257e268b63d66a29287a6290c40125372 | /Array/Leetcode_JZO_74_medium_二维数组的寻找.py | 1fca5bff8d82796caede9ab8f694f070255a3fa1 | [] | no_license | lilitom/Leetcode-problems | 7dea24a1d07b3ee49e2f90764330f3e695f4f64d | 82901a31c558433478dd23026efda63cf4dae8e5 | refs/heads/master | 2020-12-02T22:39:34.812479 | 2018-10-21T11:38:31 | 2018-10-21T11:38:31 | 96,162,066 | 2 | 2 | null | 2017-07-05T13:06:18 | 2017-07-04T01:07:41 | Python | GB18030 | Python | false | false | 1,164 | py | '''
Write an efficient algorithm that searches for a value in an m x n matrix. This matrix has the following properties:
Integers in each row are sorted from left to right.
The first integer of each row is greater than the last integer of the previous row.
For example,
Consider the following matrix:
[
[1, 3, 5, 7],
[10, 11, 16, 20],
[23, 30, 34, 50]
]
Given target = 3, return true.
'''
#South China University of Technology
#Author:Guohao
#coding=utf-8
class Solution(object):
def searchMatrix(self, matrix, target):
"""
:type matrix: List[List[int]]
:type target: int
:rtype: bool
"""
if not matrix:
return False
row,cols=len(matrix),len(matrix[0])
low,high=0,row*cols-1
while low<=high: #注意这里要有等号的
mid=(low+high)/2
num=matrix[mid/cols][mid%cols]
if num==target:
return True
if num<target:
low=mid+1
else:
high=mid-1
return False
#参考:
#https://leetcode.com/problems/search-a-2d-matrix/description/ | [
"[email protected]"
] | |
ffc51a551202748e2b844689ae88726dc0f22a93 | b310ea0f7a60786f7e17ac80154fcd5f2c3ffd29 | /Entity/Entity.py | 756f25faf1a3f34fce0ad9b1028e4b5531edb829 | [
"MIT"
] | permissive | twtrubiks/Flask-Migrate-Tutorial | 464e475e7ed8bd5127420c819b4672dc15f9ba61 | a659171eee4c84fd0ea1a1e39914c53d44177ea8 | refs/heads/master | 2021-01-11T22:42:04.620084 | 2017-02-15T09:49:45 | 2017-02-15T09:49:45 | 79,018,103 | 7 | 6 | null | null | null | null | UTF-8 | Python | false | false | 635 | py | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///app.db'
db = SQLAlchemy(app)
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
class PictureDate(db.Model):
__tablename__ = 'PictureDate'
Id = db.Column(db.Integer, primary_key=True)
Uuid = db.Column(db.String(64), unique=True)
Title = db.Column(db.String(64))
Description = db.Column(db.String(128))
if __name__ == '__main__':
manager.run()
| [
"[email protected]"
] | |
97210ea3fec9b92d035e5e3c826327e710361fdc | 3d0ae7c8693463faa11bacad8e6ea9d0d70b9eb1 | /pytools/utilities/python/tklib_image_utils.py | fcfd1fa577b7d1e1f3361b64d84d1a8f276abf3b | [] | no_license | stefie10/slu_hri | a76f79094bd1740676fec5d889411ba3b1d9dc26 | 50753379953e1ff822162eeab094cffe4a30f3e1 | refs/heads/master | 2022-12-14T01:07:51.522258 | 2020-08-31T00:50:12 | 2020-08-31T00:50:12 | 291,386,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,553 | py | from copy import deepcopy
from scipy import *
def median_filter(myimage, filter_size):
new_map = deepcopy(myimage)
#for i in range(filter_size, len(myimage)-filter_size):
# for j in range(filter_size, len(myimage[0])-filter_size):
for i in range(len(myimage)):
for j in range(len(myimage[0])):
if(i <= filter_size or j < filter_size
or i >= len(myimage)-filter_size or j > len(myimage[0])-filter_size):
new_map[i,j]=0.0
continue
neigh = myimage[i-filter_size:i+filter_size+1,j-filter_size:j+filter_size+1]
v = median(neigh.flatten())
new_map[i,j] = v
return new_map
def skeletonize(b_map, max_num_iterations):
for i in range(max_num_iterations):
print "iteration, ", i
if(i%2 == 0):
b_map, num_deleted = skeletonize_iter(b_map, True)
else:
b_map, num_deleted = skeletonize_iter(b_map, False)
if(num_deleted == 0):
return b_map
return b_map
def skeletonize_iter(binary_map, is_step1):
new_map = deepcopy(binary_map)
num_deleted = 0
for i in range(len(binary_map)):
for j in range(len(binary_map[0])):
#print len(binary_map), len(binary_map[0])
en = binary_map[i-1:i+2,j-1:j+2]
if(i == 0 or i == len(binary_map) - 1
or j == 0 or j == len(binary_map[0]) - 1):
new_map[i,j] = 0.0
elif(binary_map[i][j] == 0.0):
continue
elif(skeletonize_is_deleted(en, is_step1)):
num_deleted +=1
new_map[i,j] = 0.0
return new_map, num_deleted
def skeletonize_is_deleted(en, is_step1=True):
if(not len(en) == 3 or not len(en[0]) == 3):
print "not an eight neighbor"
exit(1)
else:
s = sum(en)
n = skeletonize_num_transitions(en)
if(is_step1):
p1 = en[0,1]*en[1,2]*en[2,1]
p2 = en[1,2]*en[2,1]*en[1,0]
else:
p1 = en[1,0]*en[0,1]*en[1,2]
p2 = en[0,1]*en[2,1]*en[1,0]
if(s <= 7 and s >= 3 and n == 1 and p1 == 0 and p2 == 0):
return True
return False
def skeletonize_num_transitions(en):
flat_en = concatenate((en[0,:], en[1:,2], [en[2,1], en[2,0], en[1,0]]))
zero_one_count = 0
for i in range(len(flat_en)):
if(flat_en[i-1] == 0 and flat_en[i] == 1):
zero_one_count += 1
return zero_one_count
| [
"[email protected]"
] | |
c442edea8bffc494b18b504b67a661651392627c | 9adc810b07f7172a7d0341f0b38088b4f5829cf4 | /experiments/ashvin/icml2020/hand/sparse/mc1.py | 91b9a0c61f14b220fae8f8cf80a48e1e0efe16a4 | [
"MIT"
] | permissive | Asap7772/railrl_evalsawyer | 7ee9358b5277b9ddf2468f0c6d28beb92a5a0879 | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | refs/heads/main | 2023-05-29T10:00:50.126508 | 2021-06-18T03:08:12 | 2021-06-18T03:08:12 | 375,810,557 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,473 | py | """
AWR + SAC from demo experiment
"""
from rlkit.demos.source.dict_to_mdp_path_loader import DictToMDPPathLoader
from rlkit.launchers.experiments.awac.awac_rl import experiment
import rlkit.misc.hyperparameter as hyp
from rlkit.launchers.arglauncher import run_variants
from rlkit.torch.sac.policies import GaussianPolicy
if __name__ == "__main__":
variant = dict(
num_epochs=1001,
num_eval_steps_per_epoch=1000,
num_trains_per_train_loop=1000,
num_expl_steps_per_train_loop=1000,
min_num_steps_before_training=1000,
max_path_length=1000,
batch_size=1024,
replay_buffer_size=int(1E6),
layer_size=256,
policy_class=GaussianPolicy,
policy_kwargs=dict(
hidden_sizes=[256, 256, ],
max_log_std=0,
min_log_std=-6,
),
algorithm="SAC",
version="normal",
collection_mode='batch',
trainer_kwargs=dict(
discount=0.99,
soft_target_tau=5e-3,
target_update_period=1,
policy_lr=3E-4,
qf_lr=3E-4,
reward_scale=1,
beta=1,
use_automatic_entropy_tuning=False,
alpha=0,
compute_bc=True,
bc_num_pretrain_steps=0,
q_num_pretrain1_steps=0,
q_num_pretrain2_steps=25000,
policy_weight_decay=1e-4,
bc_loss_type="mse",
rl_weight=1.0,
use_awr_update=True,
use_reparam_update=True,
reparam_weight=0.0,
awr_weight=0.0,
bc_weight=1.0,
),
num_exps_per_instance=1,
region='us-west-2',
path_loader_class=DictToMDPPathLoader,
path_loader_kwargs=dict(
obs_key="state_observation",
demo_paths=[
# dict(
# path="demos/icml2020/hand/pen2_sparse.npy",
# obs_dict=True,
# is_demo=True,
# ),
# dict(
# path="demos/icml2020/hand/pen_bc5.npy",
# obs_dict=False,
# is_demo=False,
# train_split=0.9,
# ),
],
),
add_env_demos=True,
add_env_offpolicy_data=True,
# logger_variant=dict(
# tensorboard=True,
# ),
load_demos=True,
pretrain_policy=True,
pretrain_rl=True,
# save_pretrained_algorithm=True,
# snapshot_mode="all",
)
search_space = {
'env': ["pen-sparse-v0", "relocate-sparse-v0", "hammer-sparse-v0", "door-sparse-v0", ],
'trainer_kwargs.bc_loss_type': ["mle"],
'trainer_kwargs.awr_loss_type': ["mle"],
'seedid': range(3),
'trainer_kwargs.beta': [0.1, 1, 10, ],
'trainer_kwargs.reparam_weight': [0.0, ],
'trainer_kwargs.awr_weight': [1.0],
'trainer_kwargs.bc_weight': [0.0, ],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
variants = []
for variant in sweeper.iterate_hyperparameters():
trainer_kwargs = variant["trainer_kwargs"]
if not (trainer_kwargs["reparam_weight"] == 0 and trainer_kwargs["awr_weight"] == 0 and trainer_kwargs["bc_weight"] == 0):
variants.append(variant)
run_variants(experiment, variants, run_id=0)
| [
"[email protected]"
] | |
93dc4b460365f00007184adbfdfea6b17f6f08bb | 066ee4df594a5dc90335d271b9d5a1b1e2a4d34c | /y/google-cloud-sdk/platform/gcutil/lib/google_api_python_client/oauth2client/django_orm.py | 3607bb9442651745171963eb5d3b141bae8d4add | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"MIT",
"Python-2.0"
] | permissive | ychen820/microblog | a2d82447525325ec58285c2e5db58b79cceaca1b | d379afa2db3582d5c3be652165f0e9e2e0c154c6 | refs/heads/master | 2021-01-20T05:58:48.424357 | 2015-04-28T22:03:09 | 2015-04-28T22:03:09 | 32,948,331 | 0 | 2 | BSD-3-Clause | 2020-07-25T05:04:35 | 2015-03-26T19:45:07 | Python | UTF-8 | Python | false | false | 3,782 | py | # Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OAuth 2.0 utilities for Django.
Utilities for using OAuth 2.0 in conjunction with
the Django datastore.
"""
import oauth2client
import base64
import pickle
from django.db import models
from oauth2client.client import Storage as BaseStorage
class CredentialsField(models.Field):
__metaclass__ = models.SubfieldBase
def __init__(self, *args, **kwargs):
if 'null' not in kwargs:
kwargs['null'] = True
super(CredentialsField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return "TextField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, oauth2client.client.Credentials):
return value
return pickle.loads(base64.b64decode(value))
def get_db_prep_value(self, value, connection, prepared=False):
if value is None:
return None
return base64.b64encode(pickle.dumps(value))
class FlowField(models.Field):
__metaclass__ = models.SubfieldBase
def __init__(self, *args, **kwargs):
if 'null' not in kwargs:
kwargs['null'] = True
super(FlowField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return "TextField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, oauth2client.client.Flow):
return value
return pickle.loads(base64.b64decode(value))
def get_db_prep_value(self, value, connection, prepared=False):
if value is None:
return None
return base64.b64encode(pickle.dumps(value))
class Storage(BaseStorage):
"""Store and retrieve a single credential to and from
the datastore.
This Storage helper presumes the Credentials
have been stored as a CredenialsField
on a db model class.
"""
def __init__(self, model_class, key_name, key_value, property_name):
"""Constructor for Storage.
Args:
model: db.Model, model class
key_name: string, key name for the entity that has the credentials
key_value: string, key value for the entity that has the credentials
property_name: string, name of the property that is an CredentialsProperty
"""
self.model_class = model_class
self.key_name = key_name
self.key_value = key_value
self.property_name = property_name
def locked_get(self):
"""Retrieve Credential from datastore.
Returns:
oauth2client.Credentials
"""
credential = None
query = {self.key_name: self.key_value}
entities = self.model_class.objects.filter(**query)
if len(entities) > 0:
credential = getattr(entities[0], self.property_name)
if credential and hasattr(credential, 'set_store'):
credential.set_store(self)
return credential
def locked_put(self, credentials):
"""Write a Credentials to the datastore.
Args:
credentials: Credentials, the credentials to store.
"""
args = {self.key_name: self.key_value}
entity = self.model_class(**args)
setattr(entity, self.property_name, credentials)
entity.save()
def locked_delete(self):
"""Delete Credentials from the datastore."""
query = {self.key_name: self.key_value}
entities = self.model_class.objects.filter(**query).delete()
| [
"[email protected]"
] | |
3e8fa85fb7afd03c45f83c311743c88e5e962eb2 | fd7406dcf3f898bd5b82a0ee4306e02c1d1b4970 | /backup/testcase/operation_testcase/testWebBasicDataOperation.py | d50339d1639b7162ead6e2eccf1c21f0aedb5275 | [] | no_license | 18786262315/zidonghuaapi | e5f979fc599f8bca08e7e5cfbd58943fe36b75d3 | 28145f13231e9df76a33894b82c0e552afb485fc | refs/heads/master | 2020-03-26T01:48:13.018859 | 2018-08-11T12:32:51 | 2018-08-11T12:32:51 | 144,383,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,020 | py | # -*- coding: utf8 -*-
import unittest
from AthenaTest.common.loadDatas import *
import sys
import importlib
importlib.reload(sys)
class testWebBasicDataOperation(unittest.TestCase):
def setUp(self):
self.datafile = '../../datacase/operation_datacase/web_basic_data_operation.xls'
def test_merchantInfoOperation(self):
'''商家资料测试'''
datafile = self.datafile
loadDatas(self, datafile, u'merchantInfoOperation')
def test_warehouseInfoOperation(self):
'''收货仓资料测试'''
datafile = self.datafile
loadDatas(self, datafile, u'warehouseInfoOperation')
def test_trunkInfoOperation(self):
'''物流资料测试'''
datafile = self.datafile
loadDatas(self, datafile, u'trunkInfoOperation')
def test_branchInfoOperation(self):
'''网点资料测试'''
datafile = self.datafile
loadDatas(self, datafile, u'branchInfoOperation')
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
5e90f53f83015074277634b94893dfe780aae9c0 | 32dbb74f03c7450ee1f3166f82260e60272f57e0 | /pushbase/session_recording_component.py | 7816971cab9b893aed9788a232cbc81f96f4be65 | [] | no_license | cce/buttons10 | 61555bc767f2bd300bfffb373f9feaae96b83ca7 | 6f1137c96eead0b9771ad8ec9327dd72ada2e916 | refs/heads/master | 2021-04-15T09:45:39.684764 | 2018-03-24T04:29:52 | 2018-03-24T04:29:52 | 126,565,725 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,429 | py | # Embedded file name: /Users/versonator/Jenkins/live/output/mac_64_static/Release/python-bundle/MIDI Remote Scripts/pushbase/session_recording_component.py
from __future__ import absolute_import, print_function, unicode_literals
from itertools import ifilter
import Live
from ableton.v2.base import listens, liveobj_valid, EventObject
from ableton.v2.control_surface.control import ButtonControl
from ableton.v2.control_surface.components import SessionRecordingComponent
from .consts import MessageBoxText
from .message_box_component import Messenger
Quantization = Live.Song.Quantization
def track_can_overdub(track):
return not track.has_audio_input
def track_can_record(track):
return track.can_be_armed and (track.arm or track.implicit_arm)
def get_clip_slot_from_index(song, track, clip_slot_index):
clip_slots = track.clip_slots
if clip_slot_index < len(clip_slots):
return clip_slots[clip_slot_index]
else:
return None
def have_other_recording_clips(tracks, recording_clip):
for track in ifilter(lambda t: t.can_be_armed and (t.arm or t.implicit_arm), tracks):
index = track.playing_slot_index
slot = track.clip_slots[index] if 0 <= index < len(track.clip_slots) else None
clip = getattr(slot, u'clip', None)
if getattr(clip, u'is_recording', False) and clip is not recording_clip:
return True
return False
class FixedLengthRecording(EventObject):
def __init__(self, song = None, clip_creator = None, fixed_length_setting = None, *a, **k):
assert song is not None
assert clip_creator is not None
assert fixed_length_setting is not None
super(FixedLengthRecording, self).__init__(*a, **k)
self._song = song
self._clip_creator = clip_creator
self._fixed_length_setting = fixed_length_setting
self._clip_creator.legato_launch = self._fixed_length_setting.legato_launch
self.__on_setting_selected_index_changes.subject = self._fixed_length_setting
self.__on_setting_legato_launch_changes.subject = self._fixed_length_setting
return
def should_start_fixed_length_recording(self, clip_slot):
return track_can_record(clip_slot.canonical_parent) and not clip_slot.is_recording and not clip_slot.has_clip and self._fixed_length_setting.enabled
def start_recording_in_slot(self, track, clip_slot_index):
song = self._song
song.overdub = True
if track_can_record(track):
self._record_in_slot(track, track.clip_slots[clip_slot_index])
if not song.is_playing:
song.is_playing = True
def _is_infinite_recording(self, clip):
return not clip.is_overdubbing
def stop_recording(self, clip):
if have_other_recording_clips(self._song.tracks, clip):
clip.fire()
elif self._is_infinite_recording(clip):
self._song.session_record = False
else:
self._song.overdub = False
def _record_in_slot(self, track, clip_slot):
if self.should_start_fixed_length_recording(clip_slot):
length, quant = self._fixed_length_setting.get_selected_length(self._song)
if self._song.is_playing:
quant = self._song.clip_trigger_quantization
if track_can_overdub(track):
self._clip_creator.create(clip_slot, length=length, launch_quantization=self._song.clip_trigger_quantization)
else:
clip_slot.fire(record_length=length, launch_quantization=quant)
elif not clip_slot.is_playing or not self._song.is_playing:
if clip_slot.has_clip:
clip_slot.stop()
clip_slot.fire(force_legato=True, launch_quantization=Quantization.q_no_q)
else:
clip_slot.fire()
@listens(u'selected_index')
def __on_setting_selected_index_changes(self, _):
length, _ = self._fixed_length_setting.get_selected_length(self._song)
self._clip_creator.fixed_length = length
@listens(u'legato_launch')
def __on_setting_legato_launch_changes(self, value):
self._clip_creator.legato_launch = value
class FixedLengthSessionRecordingComponent(SessionRecordingComponent, Messenger):
foot_switch_button = ButtonControl()
arrangement_record_button = ButtonControl()
capture_midi_button = ButtonControl()
def __init__(self, clip_creator = None, fixed_length_setting = None, *a, **k):
assert clip_creator is not None
assert fixed_length_setting is not None
super(FixedLengthSessionRecordingComponent, self).__init__(*a, **k)
self._fixed_length_recording = self.register_disconnectable(FixedLengthRecording(song=self.song, clip_creator=clip_creator, fixed_length_setting=fixed_length_setting))
self.footswitch_toggles_arrangement_recording = False
self.__on_record_mode_changed.subject = self.song
self.__on_record_mode_changed()
self.set_trigger_recording_on_release(not any((self._record_button.is_pressed, self.arrangement_record_button.is_pressed)))
return
def set_trigger_recording_on_release(self, trigger_recording):
self._should_trigger_recording = trigger_recording
@foot_switch_button.pressed
def foot_switch_button(self, button):
if self.footswitch_toggles_arrangement_recording:
self._toggle_arrangement_recording()
else:
self._trigger_recording()
@capture_midi_button.pressed
def capture_midi_button(self, button):
try:
self.song.capture_midi()
self.set_trigger_recording_on_release(not any((self._record_button.is_pressed, self.arrangement_record_button.is_pressed)))
except RuntimeError:
pass
@arrangement_record_button.pressed
def arrangement_record_button(self, _):
self._on_arrangement_record_button_pressed()
@arrangement_record_button.released
def arrangement_record_button(self, _):
self._on_arrangement_record_button_released()
def _toggle_arrangement_recording(self):
self.song.record_mode = not self.song.record_mode
def _on_record_button_pressed(self):
pass
def _on_record_button_released(self):
self._trigger_recording_action(self._trigger_recording)
def _on_arrangement_record_button_pressed(self):
pass
def _on_arrangement_record_button_released(self):
self._trigger_recording_action(self._toggle_arrangement_recording)
def _trigger_recording_action(self, recording_action):
if self._should_trigger_recording:
recording_action()
self._should_trigger_recording = True
def _clip_slot_index_to_record_into(self):
song = self.song
selected_scene = song.view.selected_scene
return list(song.scenes).index(selected_scene)
def _update_record_button(self):
if self.is_enabled():
song = self.song
clip_slot = get_clip_slot_from_index(song, song.view.selected_track, self._clip_slot_index_to_record_into())
if liveobj_valid(clip_slot) and clip_slot.is_triggered and song.overdub and not clip_slot.is_recording:
self._record_button.color = u'Recording.Transition'
elif song.record_mode:
self._record_button.color = u'Recording.ArrangementRecordingOn'
else:
super(FixedLengthSessionRecordingComponent, self)._update_record_button()
self.arrangement_record_button.color = self._record_button.color
@listens(u'record_mode')
def __on_record_mode_changed(self):
self._update_record_button()
def _start_recording(self):
track = self.song.view.selected_track
clip_slot_index = self._clip_slot_index_to_record_into()
self._fixed_length_recording.start_recording_in_slot(track, clip_slot_index)
if track_can_record(track):
self._ensure_slot_is_visible(self.song.view.selected_track, clip_slot_index)
def _ensure_slot_is_visible(self, track, scene_index):
song = self.song
if song.view.selected_track == track:
song.view.selected_scene = song.scenes[scene_index]
self._view_selected_clip_detail()
def _handle_limitation_error_on_scene_creation(self):
self.expect_dialog(MessageBoxText.SCENE_LIMIT_REACHED) | [
"[email protected]"
] | |
63ef7efb72cdbd6ab053b45a1e1b67b28b822140 | 3bb57eb1f7c1c0aced487e7ce88f3cb84d979054 | /reliability/scripts/selectors/user_study_sgss/Run_All_Grammaticality.py | e9e24f48f1582714451889b5453d7e2feb86cc90 | [] | no_license | ghpaetzold/phd-backup | e100cd0bbef82644dacc73a8d1c6b757b2203f71 | 6f5eee43e34baa796efb16db0bc8562243a049b6 | refs/heads/master | 2020-12-24T16:41:21.490426 | 2016-04-23T14:50:07 | 2016-04-23T14:50:07 | 37,981,094 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 826 | py | import os
#Generators:
generators = os.listdir('../../../substitutions/')
#generators.remove('all')
#Parameters:
ks = ['all']
criterions = ['gini']
splitters = ['best']
maxes = ['sqrt']
depths = ['None']
for c in criterions:
for s in splitters:
for m in maxes:
for d in depths:
for k in ks:
for generator in generators:
trainset = '/export/data/ghpaetzold/user_study_sgss/datasets/grammaticality_victor_all_optimistic.txt'
testset = '../../../substitutions/'
testset += generator + '/substitutions_void.txt'
output = '../../../substitutions/' + generator + '/'
output += 'substitutions_GrammaticalityUS_'+c+'_'+s+'_'+m+'_'+d+'_'+k+'.txt'
comm = 'nohup python Run_Grammaticality.py '+trainset+' '+k+' '+c+' '+s+' '+m+' '+d+' '+testset+' '+output+' &'
os.system(comm)
| [
"[email protected]"
] | |
6e0ebec1fc78e0835c6ecbdb7219e01a09843027 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_disobeying.py | 6227a9b226dae0e7d81f3e25281a295fcd1d7404 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py |
#calss header
class _DISOBEYING():
def __init__(self,):
self.name = "DISOBEYING"
self.definitions = disobey
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['disobey']
| [
"[email protected]"
] | |
3bab7c812b73262b1fec5b2de5bb8c2dabbea041 | 4b7e282fe480415f5d52c0fc0429f144156190fe | /google/ads/googleads/v8/enums/types/frequency_cap_time_unit.py | a0b4fac51b42f479dc71ca2908a0ddc7c56457ba | [
"Apache-2.0"
] | permissive | Z2Xsoft/google-ads-python | c4750357bb19da91bb3b6bf2fa84bef9d2df36d3 | 1779d52a0446c8afb2437b0a9e103dcb849f5590 | refs/heads/main | 2023-08-18T15:22:17.840364 | 2021-09-26T04:08:53 | 2021-09-26T04:08:53 | 410,444,398 | 0 | 0 | Apache-2.0 | 2021-09-26T04:08:53 | 2021-09-26T03:55:38 | null | UTF-8 | Python | false | false | 1,190 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v8.enums",
marshal="google.ads.googleads.v8",
manifest={"FrequencyCapTimeUnitEnum",},
)
class FrequencyCapTimeUnitEnum(proto.Message):
r"""Container for enum describing the unit of time the cap is
defined at.
"""
class FrequencyCapTimeUnit(proto.Enum):
r"""Unit of time the cap is defined at (e.g. day, week)."""
UNSPECIFIED = 0
UNKNOWN = 1
DAY = 2
WEEK = 3
MONTH = 4
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"[email protected]"
] | |
2ea81a70f5853392681a2ad044988336259077ca | 64bf39b96a014b5d3f69b3311430185c64a7ff0e | /intro-ansible/venv2/lib/python3.8/site-packages/ansible/modules/network/netvisor/pn_vrouterif.py | 7f17778186a113d7de769d3a5baf96d4f0e33de6 | [
"MIT"
] | permissive | SimonFangCisco/dne-dna-code | 7072eba7da0389e37507b7a2aa5f7d0c0735a220 | 2ea7d4f00212f502bc684ac257371ada73da1ca9 | refs/heads/master | 2023-03-10T23:10:31.392558 | 2021-02-25T15:04:36 | 2021-02-25T15:04:36 | 342,274,373 | 0 | 0 | MIT | 2021-02-25T14:39:22 | 2021-02-25T14:39:22 | null | UTF-8 | Python | false | false | 15,340 | py | #!/usr/bin/python
""" PN-CLI vrouter-interface-add/remove/modify """
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: pn_vrouterif
author: "Pluribus Networks (@amitsi)"
version_added: "2.2"
short_description: CLI command to add/remove/modify vrouter-interface.
description:
- Execute vrouter-interface-add, vrouter-interface-remove,
vrouter-interface-modify command.
- You configure interfaces to vRouter services on a fabric, cluster,
standalone switch or virtual network(VNET).
options:
pn_cliusername:
description:
- Provide login username if user is not root.
required: False
pn_clipassword:
description:
- Provide login password if user is not root.
required: False
pn_cliswitch:
description:
- Target switch to run the cli on.
required: False
state:
description:
- State the action to perform. Use 'present' to add vrouter interface,
'absent' to remove vrouter interface and 'update' to modify vrouter
interface.
required: True
choices: ['present', 'absent', 'update']
pn_vrouter_name:
description:
- Specify the name of the vRouter interface.
required: True
pn_vlan:
description:
- Specify the VLAN identifier. This is a value between 1 and 4092.
pn_interface_ip:
description:
- Specify the IP address of the interface in x.x.x.x/n format.
pn_assignment:
description:
- Specify the DHCP method for IP address assignment.
choices: ['none', 'dhcp', 'dhcpv6', 'autov6']
pn_vxlan:
description:
- Specify the VXLAN identifier. This is a value between 1 and 16777215.
pn_interface:
description:
- Specify if the interface is management, data or span interface.
choices: ['mgmt', 'data', 'span']
pn_alias:
description:
- Specify an alias for the interface.
pn_exclusive:
description:
- Specify if the interface is exclusive to the configuration. Exclusive
means that other configurations cannot use the interface. Exclusive is
specified when you configure the interface as span interface and allows
higher throughput through the interface.
pn_nic_enable:
description:
- Specify if the NIC is enabled or not
pn_vrrp_id:
description:
- Specify the ID for the VRRP interface. The IDs on both vRouters must be
the same IS number.
pn_vrrp_priority:
description:
- Specify the priority for the VRRP interface. This is a value between
1 (lowest) and 255 (highest).
pn_vrrp_adv_int:
description:
- Specify a VRRP advertisement interval in milliseconds. The range is
from 30 to 40950 with a default value of 1000.
pn_l3port:
description:
- Specify a Layer 3 port for the interface.
pn_secondary_macs:
description:
- Specify a secondary MAC address for the interface.
pn_nic_str:
description:
- Specify the type of NIC. Used for vrouter-interface remove/modify.
"""
EXAMPLES = """
- name: Add vrouter-interface
pn_vrouterif:
pn_cliusername: admin
pn_clipassword: admin
state: 'present'
pn_vrouter_name: 'ansible-vrouter'
pn_interface_ip: 101.101.101.2/24
pn_vlan: 101
- name: Add VRRP..
pn_vrouterif:
pn_cliusername: admin
pn_clipassword: admin
state: 'present'
pn_vrouter_name: 'ansible-vrouter'
pn_interface_ip: 101.101.101.2/24
pn_vrrp_ip: 101.101.101.1/24
pn_vrrp_priority: 100
pn_vlan: 101
- name: Remove vrouter-interface
pn_vrouterif:
pn_cliusername: admin
pn_clipassword: admin
state: 'absent'
pn_vrouter_name: 'ansible-vrouter'
pn_interface_ip: 101.101.101.2/24
"""
RETURN = """
command:
description: The CLI command run on the target node(s).
returned: always
type: str
stdout:
description: The set of responses from the vrouterif command.
returned: on success
type: list
stderr:
description: The set of error responses from the vrouterif command.
returned: on error
type: str
changed:
description: Indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
import shlex
VROUTER_EXISTS = None
INTERFACE_EXISTS = None
NIC_EXISTS = None
VRRP_EXISTS = None
def pn_cli(module):
"""
This method is to generate the cli portion to launch the Netvisor cli.
It parses the username, password, switch parameters from module.
:param module: The Ansible module to fetch username, password and switch
:return: returns the cli string for further processing
"""
username = module.params['pn_cliusername']
password = module.params['pn_clipassword']
cliswitch = module.params['pn_cliswitch']
if username and password:
cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password)
else:
cli = '/usr/bin/cli --quiet '
if cliswitch == 'local':
cli += ' switch-local '
else:
cli += ' switch ' + cliswitch
return cli
def check_cli(module, cli):
"""
This method checks if vRouter exists on the target node.
This method also checks for idempotency using the vrouter-interface-show
command.
If the given vRouter exists, return VROUTER_EXISTS as True else False.
If an interface with the given ip exists on the given vRouter,
return INTERFACE_EXISTS as True else False. This is required for
vrouter-interface-add.
If nic_str exists on the given vRouter, return NIC_EXISTS as True else
False. This is required for vrouter-interface-remove.
:param module: The Ansible module to fetch input parameters
:param cli: The CLI string
:return Global Booleans: VROUTER_EXISTS, INTERFACE_EXISTS, NIC_EXISTS
"""
vrouter_name = module.params['pn_vrouter_name']
interface_ip = module.params['pn_interface_ip']
nic_str = module.params['pn_nic_str']
# Global flags
global VROUTER_EXISTS, INTERFACE_EXISTS, NIC_EXISTS
# Check for vRouter
check_vrouter = cli + ' vrouter-show format name no-show-headers '
check_vrouter = shlex.split(check_vrouter)
out = module.run_command(check_vrouter)[1]
out = out.split()
if vrouter_name in out:
VROUTER_EXISTS = True
else:
VROUTER_EXISTS = False
if interface_ip:
# Check for interface and VRRP and fetch nic for VRRP
show = cli + ' vrouter-interface-show vrouter-name %s ' % vrouter_name
show += 'ip %s format ip,nic no-show-headers' % interface_ip
show = shlex.split(show)
out = module.run_command(show)[1]
if out:
INTERFACE_EXISTS = True
else:
INTERFACE_EXISTS = False
if nic_str:
# Check for nic
show = cli + ' vrouter-interface-show vrouter-name %s ' % vrouter_name
show += ' format nic no-show-headers'
show = shlex.split(show)
out = module.run_command(show)[1]
if nic_str in out:
NIC_EXISTS = True
else:
NIC_EXISTS = False
def get_nic(module, cli):
"""
This module checks if VRRP interface can be added. If No, return VRRP_EXISTS
as True.
If Yes, fetch the nic string from the primary interface and return nic and
VRRP_EXISTS as False.
:param module:
:param cli:
:return: nic, Global Boolean: VRRP_EXISTS
"""
vrouter_name = module.params['pn_vrouter_name']
interface_ip = module.params['pn_interface_ip']
global VRRP_EXISTS
# Check for interface and VRRP and fetch nic for VRRP
show = cli + ' vrouter-interface-show vrouter-name %s ' % vrouter_name
show += 'ip %s format ip,nic no-show-headers' % interface_ip
show = shlex.split(show)
out = module.run_command(show)[1]
out = out.split()
if len(out) > 3:
VRRP_EXISTS = True
return None
else:
nic = out[2]
VRRP_EXISTS = False
return nic
def run_cli(module, cli):
"""
This method executes the cli command on the target node(s) and returns the
output. The module then exits based on the output.
:param cli: the complete cli string to be executed on the target node(s).
:param module: The Ansible module to fetch command
"""
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
command = get_command_from_state(state)
cmd = shlex.split(cli)
# 'out' contains the output
# 'err' contains the error messages
result, out, err = module.run_command(cmd)
print_cli = cli.split(cliswitch)[1]
# Response in JSON format
if result != 0:
module.exit_json(
command=print_cli,
stderr=err.strip(),
msg="%s operation failed" % command,
changed=False
)
if out:
module.exit_json(
command=print_cli,
stdout=out.strip(),
msg="%s operation completed" % command,
changed=True
)
else:
module.exit_json(
command=print_cli,
msg="%s operation completed" % command,
changed=True
)
def get_command_from_state(state):
"""
This method gets appropriate command name for the state specified. It
returns the command name for the specified state.
:param state: The state for which the respective command name is required.
"""
command = None
if state == 'present':
command = 'vrouter-interface-add'
if state == 'absent':
command = 'vrouter-interface-remove'
if state == 'update':
command = 'vrouter-interface-modify'
return command
def main():
""" This portion is for arguments parsing """
module = AnsibleModule(
argument_spec=dict(
pn_cliusername=dict(required=False, type='str'),
pn_clipassword=dict(required=False, type='str', no_log=True),
pn_cliswitch=dict(required=False, type='str', default='local'),
state=dict(required=True, type='str',
choices=['present', 'absent']),
pn_vrouter_name=dict(required=True, type='str'),
pn_vlan=dict(type='int'),
pn_interface_ip=dict(required=True, type='str'),
pn_assignment=dict(type='str',
choices=['none', 'dhcp', 'dhcpv6', 'autov6']),
pn_vxlan=dict(type='int'),
pn_interface=dict(type='str', choices=['mgmt', 'data', 'span']),
pn_alias=dict(type='str'),
pn_exclusive=dict(type='bool'),
pn_nic_enable=dict(type='bool'),
pn_vrrp_id=dict(type='int'),
pn_vrrp_priority=dict(type='int'),
pn_vrrp_adv_int=dict(type='str'),
pn_l3port=dict(type='str'),
pn_secondary_macs=dict(type='str'),
pn_nic_str=dict(type='str')
),
required_if=(
["state", "present",
["pn_vrouter_name", "pn_interface_ip"]],
["state", "absent",
["pn_vrouter_name", "pn_nic_str"]]
),
)
# Accessing the arguments
state = module.params['state']
vrouter_name = module.params['pn_vrouter_name']
vlan = module.params['pn_vlan']
interface_ip = module.params['pn_interface_ip']
assignment = module.params['pn_assignment']
vxlan = module.params['pn_vxlan']
interface = module.params['pn_interface']
alias = module.params['pn_alias']
exclusive = module.params['pn_exclusive']
nic_enable = module.params['pn_nic_enable']
vrrp_id = module.params['pn_vrrp_id']
vrrp_priority = module.params['pn_vrrp_priority']
vrrp_adv_int = module.params['pn_vrrp_adv_int']
l3port = module.params['pn_l3port']
secondary_macs = module.params['pn_secondary_macs']
nic_str = module.params['pn_nic_str']
command = get_command_from_state(state)
# Building the CLI command string
cli = pn_cli(module)
check_cli(module, cli)
if command == 'vrouter-interface-add':
if VROUTER_EXISTS is False:
module.exit_json(
skipped=True,
msg='vRouter %s does not exist' % vrouter_name
)
if vrrp_id:
vrrp_primary = get_nic(module, cli)
if VRRP_EXISTS is True:
module.exit_json(
skipped=True,
msg=('VRRP interface on %s already exists. Check '
'the IP addresses' % vrouter_name)
)
cli += ' %s vrouter-name %s ' % (command, vrouter_name)
cli += (' ip %s vrrp-primary %s vrrp-id %s '
% (interface_ip, vrrp_primary, str(vrrp_id)))
if vrrp_priority:
cli += ' vrrp-priority %s ' % str(vrrp_priority)
if vrrp_adv_int:
cli += ' vrrp-adv-int %s ' % vrrp_adv_int
else:
if INTERFACE_EXISTS is True:
module.exit_json(
skipped=True,
msg=('vRouter interface on %s already exists. Check the '
'IP addresses' % vrouter_name)
)
cli += ' %s vrouter-name %s ' % (command, vrouter_name)
cli += ' ip %s ' % interface_ip
if vlan:
cli += ' vlan ' + str(vlan)
if l3port:
cli += ' l3-port ' + l3port
if assignment:
cli += ' assignment ' + assignment
if vxlan:
cli += ' vxlan ' + str(vxlan)
if interface:
cli += ' if ' + interface
if alias:
cli += ' alias-on ' + alias
if exclusive is True:
cli += ' exclusive '
if exclusive is False:
cli += ' no-exclusive '
if nic_enable is True:
cli += ' nic-enable '
if nic_enable is False:
cli += ' nic-disable '
if secondary_macs:
cli += ' secondary-macs ' + secondary_macs
if command == 'vrouter-interface-remove':
if VROUTER_EXISTS is False:
module.exit_json(
skipped=True,
msg='vRouter %s does not exist' % vrouter_name
)
if NIC_EXISTS is False:
module.exit_json(
skipped=True,
msg='vRouter interface with nic %s does not exist' % nic_str
)
cli += ' %s vrouter-name %s nic %s ' % (command, vrouter_name, nic_str)
run_cli(module, cli)
# Ansible boiler-plate
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
6e591a5e888418ed9a9c93a7560a9da591c60be4 | 5537eec7f43098d216d2b550678c8d10b2a26f09 | /venv/tower/lib/python2.7/site-packages/awx/main/migrations/0033_v303_v245_host_variable_fix.py | fad3545b65c1e39e6a6594561e71708a6219f49d | [] | no_license | wipro-sdx/Automation | f0ae1512b8d9d491d7bacec94c8906d06d696407 | a8c46217d0fbe51a71597b5db87cbe98ed19297a | refs/heads/master | 2021-07-08T11:09:05.314435 | 2018-05-02T07:18:54 | 2018-05-02T07:18:54 | 131,812,982 | 0 | 1 | null | 2020-07-23T23:22:33 | 2018-05-02T07:15:28 | Python | UTF-8 | Python | false | false | 661 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from awx.main.migrations import _migration_utils as migration_utils
def update_dashed_host_variables(apps, schema_editor):
Host = apps.get_model('main', 'Host')
for host in Host.objects.filter(variables='---'):
host.variables = ''
host.save()
class Migration(migrations.Migration):
dependencies = [
('main', '0032_v302_credential_permissions_update'),
]
operations = [
migrations.RunPython(migration_utils.set_current_apps_for_migrations),
migrations.RunPython(update_dashed_host_variables),
]
| [
"[email protected]"
] | |
c75af28c9291936a6161711ddbd4d4aa46c5acd0 | 99efa551de2a4586767d109b211d8c016c5454c4 | /Set/App1/H.py | 6adf6ff5e0459bf9ae5fdbe1dfe73ce7b57f8dd0 | [] | no_license | Vijay-Ky/Anit_Rooman_Python_Training | 9decf34b651ea8455bdd4cdf1e2239a77cf1bc7e | 155ba84620c28fd64a7219013c3bdd43f76fa278 | refs/heads/master | 2020-05-31T23:22:45.865104 | 2019-06-18T09:33:10 | 2019-06-18T09:33:10 | 190,537,707 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 61 | py | myset = {"apple", "banana", "cherry"}
myset.remove("orange")
| [
"[email protected]"
] | |
99fce1ecb787c18902799b2a92f8c625db4e2b4d | 342c87b969201085292187020e84e39b3464dd87 | /20200326-clojure/patterns/01_functions_first_class_python_solution.py | fa1dfeb80a5a094afb434e74ce48343a20b8ae70 | [] | no_license | shimf/webinar-live-demos | de5ab2d57c2cc4d6a51d7eeed045e21b4a52fe1a | 50f86b17994ed0393c04c46242516b5df5cef2f7 | refs/heads/master | 2022-08-26T21:39:43.883045 | 2020-05-21T07:52:31 | 2020-05-21T07:52:31 | 265,782,740 | 0 | 0 | null | 2020-05-21T07:23:31 | 2020-05-21T07:23:30 | null | UTF-8 | Python | false | false | 123 | py | from functools import partial
from operator import mul
nthice = [partial(mul, n) for n in range(10)]
print(nthice[2](3))
| [
"[email protected]"
] | |
7540c67ee87e4363953e1932e1e8090ac2b4bef0 | 444079944e66d19dd8a01fe9834b43d7aa5fc427 | /DRRQueue.py | 0083c37ff00f549cd00373f0ef062d82bde4f67b | [
"Apache-2.0"
] | permissive | dound/vns | 6ea2a6560a87fc86c6533a7e4532d61af908894d | 675c581fa18b57b801782079ed33f7b095898c7f | refs/heads/master | 2016-09-05T23:17:28.078179 | 2011-04-30T20:40:25 | 2011-04-30T20:40:25 | 288,103 | 20 | 9 | null | 2020-02-01T01:25:56 | 2009-08-25T21:54:12 | Python | UTF-8 | Python | false | false | 2,958 | py | """The DDRQueue module implements a Queue subclass for use with Deficit Round
Robin (DRR) like algorithms."""
from Queue import Queue, Empty
class DRRQueue(Queue):
"""Implements a queue for use with a DRR-like algorithm. Each queue tracks
"quanta" available to it (some unit of work - for the original DRR, this was
the number of bytes which could be sent). start_service() is used to
initiate a new round of service on the queue. task_done() should be called
each time a "job" from the queue is finished so that the appropriate quanta
can be deducted. When task_done() returns None, then no more quanta are
available for jobs from this queue this round.
Like the original, leftover quanta are only maintained if the queue is
non-empty. Unlike the original, jobs are run until the quanta available is
less than or equal to zero.
put() or put_nowait() can be used to add jobs to the queue.
Note: This queue can be used with ordinary round robin scheme by making the
quantum 1 and always calling task_done() with quanta_used=1.
"""
def __init__(self, maxsize=0, quantum=1):
"""Creates a simple JobQueue. Use put_nowait() to add jobs to the
queue."""
Queue.__init__(self, maxsize)
self.deficit_counter = 0 # number of "quanta" which are available for use
self.quantum = quantum # how much "quanta" to add each round of service
def start_service(self, quantum=None):
"""Allocates a new quantum to this queue and returns the next job from
this queue if sufficient quanta are available to this queue. The quanta
added will be self.quantum unless quantum is specified. The next job to
run is returned (if any)."""
# a new quantum of service is now available to this queue
self.deficit_counter += (self.quantum if quantum is None else quantum)
return self.__next_task()
def task_done(self, quanta_used=1):
"""Informs the queue that a job has been completed. quanta_used will be
subtracted from the amount of quanta available for jobs on this queue.
Returns the next job from this queue if sufficient quanta are available.
If sufficient quanta are not available or the queue is empty, then None
is returned."""
Queue.task_done(self)
self.deficit_counter -= quanta_used
return self.__next_task()
def __next_task(self):
"""Returns the next job from this queue if sufficient quanta are available.
If sufficient quanta are not available or the queue is empty, then None
is returned."""
if self.deficit_counter > 0:
try:
return self.get_nowait()
except Empty:
# when the queue empties, any leftover quanta are lost
self.deficit_counter = 0
# no jobs OR insufficient quanta are left
return None
| [
"[email protected]"
] | |
642e1e9eb6213ea382a825d9cf12a1a9552fa277 | ad8b30544480ba1e5f5b1cb2dec2aa77a644e8d2 | /SWEA/D2_5102_노드의거리.py | ffc560ef41043f390c247473811e0b1d849de0e1 | [] | no_license | hyunwoojeong123/Algorithm | 79abc82d944ca60342a7f8b6fc44fac20ac55123 | 0baaf3222fbbec699ffbec5d4cc680067cf293fb | refs/heads/master | 2023-07-10T18:28:51.934005 | 2021-08-18T01:51:23 | 2021-08-18T01:51:23 | 284,403,698 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 979 | py | T = int(input())
for tc in range(1,T+1):
V,E = list(map(int,input().split()))
linked = [[False for j in range(V+1)] for i in range(V+1)]
#print('연결:')
for e in range(E):
s,g = list(map(int,input().split()))
#print(s,g)
linked[s][g] = True
linked[g][s] = True
#print('연결 끝')
S,G = list(map(int,input().split()))
dist = [-1 for v in range(V+1)]
dist[S] = 0
q = []
q.append(S)
ans = 0
while q:
pv = q.pop(0)
#print(pv,dist[pv])
if pv == G:
ans = dist[G]
break
for nv in range(1,V+1):
#print('linked[{}][{}]'.format(pv,nv),linked[nv][pv])
if not linked[nv][pv]:
continue
#print('dist[{}] < dist[{}]'.format(nv,pv),dist[nv],dist[pv])
if dist[nv] != -1:
continue
q.append(nv)
dist[nv] = dist[pv]+1
print('#{} {}'.format(tc,ans))
| [
"[email protected]"
] | |
993e1f05b6625bd9b8a9e969e2a2bc0c853cb581 | 4552bce7f09cffe1770162130896c30e481f1989 | /Tensorflow components and Linear Regression/Variables.py | 16ac2f75f07572ea4151a43503576e4e67a70924 | [] | no_license | Marius-Juston/Tensorflow_Tutorial | b953745a046e068af2eb5e803180b9fdc67b9c45 | bc2217f86f31b4722426899152c6355e973de8f5 | refs/heads/master | 2021-07-17T19:11:23.685737 | 2017-10-26T03:51:46 | 2017-10-26T03:51:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,204 | py | # coding=utf-8
# In machine learning we will typically want a model that can take arbitrary inputs, such as the one above. To make
# the model trainable, we need to be able to modify the graph to get new outputs with the same input. Variables allow
# us to add trainable parameters to a graph. They are constructed with a type and initial value:
import tensorflow as tf
from Helper_Methods import open_tensorboard
W = tf.Variable(.3, dtype=tf.float32, name="weight")
b = tf.Variable(-.3, dtype=tf.float32, name="bias")
x = tf.placeholder(tf.float32)
linear_model = W * x + b
sess = tf.Session()
# Constants are initialized when you call tf.constant, and their value can never change. By contrast, variables are
# not initialized when you call tf.Variable. To initialize all the variables in a TensorFlow program,
# you must explicitly call a special operation as follows:
init = tf.global_variables_initializer()
sess.run(init)
# Since x is a placeholder, we can evaluate linear_model for several values of x simultaneously as follows:
print(sess.run(linear_model, {x: [1, 2, 3, 4]}))
# to produce the output
# [ 0. 0.30000001 0.60000002 0.90000004]
open_tensorboard(__file__, sess)
| [
"[email protected]"
] | |
60bccdd0b292adf076dd2409f1527cfdfa80ee7b | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /q5jCspdCvmSjKE9HZ_10.py | d11123755af6974e993ef6ec8e17acbdeed16337 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py |
def lcm(a,b):
if a<b:
a,b=b,a
for i in range(b,a*b+1):
if i%a==0 and i%b==0:
break
return i
def lcm_of_list(numbers):
m=lcm(numbers[0],numbers[1])
for i in range(2,len(numbers)):
m=lcm(m,numbers[i])
return m
| [
"[email protected]"
] | |
6d8aecc8c41ff3f7b51eb4678cc55f2310d9c393 | ed7e61c8eef7fb2213adeb67557d605470c17fb3 | /ML/50-mlps/28-keras-cnn-big-filters/create_validation_curve.py | f39496852849e0b8a1383a08607ff10c06e7c771 | [] | no_license | MartinThoma/algorithms | 535840224323822f2ea6b7dd6f82a0fdd22a0ff9 | a251e9599b685dbf89c891f02d20fefd8538ead5 | refs/heads/master | 2023-02-23T17:58:10.913634 | 2023-02-21T05:58:59 | 2023-02-21T05:58:59 | 4,939,076 | 241 | 126 | null | 2023-02-16T05:16:23 | 2012-07-07T16:07:23 | Python | UTF-8 | Python | false | false | 780 | py | #!/usr/bin/env python
"""Visualize validation curve."""
import matplotlib.pyplot as plt
import pandas as pd
Y_SCALE_FACTOR = 100
# Prepare dataframe
df = pd.read_csv('log.csv', sep=';')
df = df[['epoch', 'acc', 'val_acc']]
df[['acc', 'val_acc']] = df[['acc', 'val_acc']] * Y_SCALE_FACTOR
df = df.set_index('epoch').rename(columns={'acc': 'Training Accuracy',
'val_acc': 'Validation Accuracy'})
print(df)
# Plot
fig, ax = plt.subplots()
df.plot.line(ylim=(0.75 * Y_SCALE_FACTOR, 1.00 * Y_SCALE_FACTOR),
title='Validation Curve',
ax=ax)
ax.minorticks_on() # required for minor grid
ax.grid()
ax.grid(which='minor', linestyle=':', linewidth='0.5', color='black')
plt.savefig('validation-curve.png', dpi=300)
| [
"[email protected]"
] | |
576b0a80224d85071aba025ea5b76af8820e020b | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_132/ch16_2019_03_29_18_09_22_468927.py | b8183210d987ac4a8541d274fe872e653ee8da67 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 116 | py | import math
def distancia_euclidiana(x1 , x2 , y1 , y2):
d = math.sqrt((x2 - x1)**2 + (yb - ya)**2)
return d | [
"[email protected]"
] | |
1cc80ab7f1cc14d254b7aa8d4c1063259b1c3fbb | 45b180004c441663bd223219f8edef8c82481be1 | /color.py | 9c26e029d1b9faa98fc5d9d1338fca7c4d1aa9db | [] | no_license | bradbann/mypy | a679e22fdd04525faf32a73934d813a45af1092f | 8bf6234be438aaf3ce2b69e4c10b2ce84eaccb98 | refs/heads/master | 2023-02-19T14:21:59.362385 | 2020-12-31T09:35:11 | 2020-12-31T09:35:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | from colorama import Fore,Back,Style
import colorsys
print(Fore.BLACK + 'some red text')
print(Back.BLUE + 'and with a green background')
print(Style.DIM + 'and in dim text')
print(Style.RESET_ALL)
print('back to normal now') | [
"[email protected]"
] | |
88e97776f2dcc40e428edb17c7c0b4271f3e1855 | 4d995c70ab763e0422ed52e76a3aef9a48208023 | /tests/test_data_processors.py | e2b6a855b09d563ab4023b41385a7ba32cb9a542 | [] | no_license | QualiSystems/cloudshell-autodiscovery | b254008c3422b525cdb72d4c2716ef5771a38b8a | 9ed9e8e74fcf12cb7f7c7fd945f8bc4fae6d5755 | refs/heads/master | 2021-06-03T17:45:36.975215 | 2019-05-30T13:39:30 | 2019-05-30T13:44:48 | 83,421,029 | 1 | 1 | null | 2021-03-25T22:03:43 | 2017-02-28T10:44:13 | Rich Text Format | UTF-8 | Python | false | false | 7,409 | py | import unittest
import mock
from autodiscovery.data_processors import JsonDataProcessor
class TestJsonDataProcessor(unittest.TestCase):
def setUp(self):
self.filename = "example.json"
self.logger = mock.MagicMock()
self.json_data_processor = JsonDataProcessor(logger=self.logger)
@mock.patch("autodiscovery.data_processors.config")
@mock.patch("autodiscovery.data_processors.utils")
def test_prepare_file_path(self, utils, config):
full_path = mock.MagicMock()
utils.get_full_path.return_value = full_path
# act
result = self.json_data_processor._prepare_file_path(filename=self.filename)
# verify
self.assertEqual(result, full_path)
utils.get_full_path.assert_called_once_with(config.DATA_FOLDER, self.filename)
@mock.patch("autodiscovery.data_processors.json")
@mock.patch("autodiscovery.data_processors.open")
def test_save(self, open, json):
data = mock.MagicMock()
file_path = mock.MagicMock()
self.json_data_processor._prepare_file_path = mock.MagicMock(return_value=file_path)
# act
self.json_data_processor._save(data=data, filename=self.filename)
# verify
self.json_data_processor._prepare_file_path.assert_called_once_with(self.filename)
open.assert_called_once_with(file_path, "w")
json.dump.assert_called_once_with(data,
open().__enter__(),
indent=4,
sort_keys=True)
@mock.patch("autodiscovery.data_processors.json")
@mock.patch("autodiscovery.data_processors.open")
def test_load(self, open, json):
file_path = mock.MagicMock()
data = mock.MagicMock()
json.load.return_value = data
self.json_data_processor._prepare_file_path = mock.MagicMock(return_value=file_path)
# act
result = self.json_data_processor._load(filename=self.filename)
# verify
self.assertEqual(result, data)
self.json_data_processor._prepare_file_path.assert_called_once_with(self.filename)
open.assert_called_once_with(file_path, "r")
json.load.assert_called_once_with(open().__enter__())
@mock.patch("autodiscovery.data_processors.config")
def test_save_vendor_enterprise_numbers(self, config):
data = mock.MagicMock()
self.json_data_processor._save = mock.MagicMock()
# act
self.json_data_processor.save_vendor_enterprise_numbers(data=data)
# verify
self.json_data_processor._save.assert_called_once_with(data=data,
filename=config.VENDOR_ENTERPRISE_NUMBERS_FILE)
@mock.patch("autodiscovery.data_processors.config")
def test_load_vendor_enterprise_numbers(self, config):
"""Check that method will properly merge initial vendors config with the additional one"""
data = mock.MagicMock()
self.json_data_processor._load = mock.MagicMock(return_value=data)
# act
result = self.json_data_processor.load_vendor_enterprise_numbers()
# verify
self.assertEqual(result, data)
self.json_data_processor._load.assert_called_once_with(filename=config.VENDOR_ENTERPRISE_NUMBERS_FILE)
def test_merge_vendors_data(self):
"""Check that method will properly merge initial vendors config with the additional one"""
conf_data = [
{
"name": "Cisco",
"default_os": "IOS",
"operation_systems": [
{
"name": "IOS",
"default_model": "switch",
},
{
"name": "IOSXR",
"default_model": "router",
}
]
},
{
"name": "Raritan",
"default_prompt": "#",
"family_name": "PDU",
"model_name": "Raritan PDU",
"driver_name": "Raritan PDU Driver"
}
]
additional_data = [
{
"name": "Cisco",
"default_os": "IOS-EXTENDED",
"operation_systems": [
{
"name": "IOS-EXTENDED",
"default_model": "switch",
},
{
"name": "IOSXR",
"default_model": "switch",
}
]
},
{
"name": "Huawei",
"default_os": "VPR",
"operation_systems": [
{
"name": "VRP",
"default_model": "switch",
}
]
},
]
expected_data = [
{
"name": "Cisco",
"default_os": "IOS-EXTENDED",
"operation_systems": [
{
"name": "IOS-EXTENDED",
"default_model": "switch",
},
{
"name": "IOSXR",
"default_model": "switch",
},
{
"name": "IOS",
"default_model": "switch",
}
]
},
{
"name": "Huawei",
"default_os": "VPR",
"operation_systems": [
{
"name": "VRP",
"default_model": "switch",
}
]
},
{
"name": "Raritan",
"default_prompt": "#",
"family_name": "PDU",
"model_name": "Raritan PDU",
"driver_name": "Raritan PDU Driver"
},
]
# act
result = self.json_data_processor._merge_vendors_data(conf_data, additional_data)
# verify
self.assertEqual(result, expected_data)
@mock.patch("autodiscovery.data_processors.config")
@mock.patch("autodiscovery.data_processors.models")
def test_load_vendor_config(self, models, config):
"""Check that method will return VendorDefinitionCollection model"""
vendors_collection = mock.MagicMock()
models.VendorDefinitionCollection.return_value = vendors_collection
additional_vendors_data = mock.MagicMock()
vendors_data = mock.MagicMock()
self.json_data_processor._merge_vendors_data = mock.MagicMock()
self.json_data_processor._load = mock.MagicMock(return_value=vendors_data)
# act
result = self.json_data_processor.load_vendor_config(additional_vendors_data=additional_vendors_data)
# verify
self.assertEqual(result, vendors_collection)
self.json_data_processor._load.assert_called_once_with(filename=config.VENDORS_CONFIG_FILE)
self.json_data_processor._merge_vendors_data.assert_called_once_with(conf_data=vendors_data,
additional_data=additional_vendors_data)
| [
"[email protected]"
] | |
5ca3a3da95dfa01675de20c2a2689d7b0c4568a5 | b9faae037d6c2fb69406aa1f2e86f42d3a6225ce | /data/snippets/github.com/kkkmmu/useful_script/python/function.py | 07ead533d318ceefbec699c872dfd32f30dc60cf | [] | no_license | lovefcaaa/snk.dev-assistant | 9eaedd448ee2c3f532aa7acdbee1ff1d91f9c07f | 20ffcdddba19102348d75235f7d6d557c2386e69 | refs/heads/master | 2023-07-09T14:12:55.141283 | 2018-06-25T14:18:49 | 2018-06-25T14:18:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,842 | py | print(abs(-110))
print(max(1, 5, -10, 10))
print(int("123"))
print(int(1.24))
print(float("1.2345"))
print(str(1.23))
print(str(1000))
print(bool(100))
print(bool(-1000))
print(bool(0))
print(bool(None))
a = abs
print(a(-1234))
print(hex(1234))
def mabs(x):
if x >= 0:
return x
else:
return -x
print(mabs(-11111))
def nop():
pass
print(nop())
def mabs2(x):
if not isinstance(x, (int, float)):
raise TypeError("Bad operand type")
if x >= 0:
return x
else:
return -x
print(mabs2(-12343))
import math
def move(x, y, step, angle=0):
nx = x + step * math.cos(angle)
ny = y - step * math.sin(angle)
return nx, ny
print(move(10, 10, 1, 30))
z = move(16, 16, 2, 45)
print(z)
def power(x, n=2):
s = 1
while n > 0:
n = n-1
s = s*x
return s
print(power(2))
print(power(2, 4))
def defaultp(a, b, c=1, d=2):
print(a, b, c, d)
defaultp(1, 2)
defaultp(1, 2, d=10)
def calc(*numbers):
sum = 0
for n in numbers:
sum += n*n
return sum
print(calc(1,2,3,4,5))
print(calc(1,2))
print(calc(1))
print(calc())
nums = [4, 5, 5, 6, 8]
print(calc(*nums))
def person(name, age, **kv):
print("name:", name, "age:", age, "other:", kv)
person("Liwei", 30)
person("Liwei", 30, city="Beijing")
person("Liwei", 30, city="Beijing", gender="M")
extra = {"City": "Beijing", "job": "polan"}
person("Jack", 100, **extra)
def person2(name, age, *, city, job):
print(name, age, city, job)
#person2("jack", 24)
#person2("jack", 24, "Beijing", "polan")
person2("jack", 24, city="Beijin", job="polan")
def fact(n):
if n == 1:
return 1
return n * fact(n -1)
print(fact(10))
def f (x):
return x * x
r = map(f, [1,2,3,4,5,6,7,8,9,10])
print(r)
print(list(r))
print(list(map(str, [1,2,3,4,5,6,7,8,9])))
| [
"[email protected]"
] | |
a892c3b8705ffa0ffda05544d3ee3374853c0d4a | 65f548bbeb62d7f2476351bda7b723dab216ce08 | /train_scripts/trainScriptNN.py | aae161d89eeaafeb65bc1a94037985b9c6919291 | [] | no_license | StefanPushkov/TestDocker | f6148db20e2f50694528f88a837e4c517e5a8afe | aecdd0b5207e9f93950faa51535a888d62bb67c1 | refs/heads/master | 2022-11-21T08:44:00.607204 | 2020-07-20T10:46:39 | 2020-07-20T10:46:39 | 280,253,224 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,911 | py | import torch
import torch.nn as nn
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from torch.utils import data as data_utils
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import roc_auc_score
import torch.nn.functional as F
# Read data
df = pd.read_csv("./data/train.csv/train.csv")
df_test = pd.read_csv('./data/test.csv/test.csv')
print('-'*15, "Data loaded")
# Replace inf values with np.nan, then replace nan with 0
df.replace([np.inf, -np.inf], np.nan,inplace=True)
df = df.fillna(0)
df_test.replace([np.inf, -np.inf], np.nan,inplace=True)
df_test = df_test.fillna(0)
# Features
X = df.drop(['sample_id', 'y'], axis=1)
X_submission = df_test.drop(['sample_id'], axis=1)
# Labels
y = df['y']
# Features normalization
features_norm = StandardScaler()
X_std = features_norm.fit_transform(X)
X_submission_std = features_norm.fit_transform(X_submission)
# Split data in train/test
X_train, x_test, Y_train, y_test = train_test_split(X_std, y, test_size=0.2, random_state=42)
# To torch tensor: Train
X_train_tensor = torch.tensor(X_train, dtype=torch.float)
Y_train_tensor = torch.tensor(Y_train.values).flatten()
# Test
x_test_tensor = torch.tensor(x_test, dtype=torch.float)
y_test_tensor = torch.tensor(y_test.values).flatten()
# Create train dataloader
batch_size = 128
train_dataset = data_utils.TensorDataset(X_train_tensor, Y_train_tensor)
train_loader = data_utils.DataLoader(dataset = train_dataset, batch_size = batch_size, shuffle = True)
# Create eval dataloader
eval_dataset = data_utils.TensorDataset(x_test_tensor, y_test_tensor)
eval_loader = data_utils.DataLoader(dataset = eval_dataset, batch_size = batch_size, shuffle = True)
import torch.nn.functional as F
# Class must extend nn.Module
class MyClassifier(nn.Module):
def __init__(self):
super(MyClassifier,self).__init__()
# Our network consists of 3 layers. 1 input, 1 hidden and 1 output layer
self.fc1 = nn.Linear(1612,200)
self.fc2 = nn.Linear(200,100)
self.layer_out = nn.Linear(100,1)
self.dropout = nn.Dropout()
self.bn0 = nn.BatchNorm1d(1612)
self.bn1 = nn.BatchNorm1d(200)
self.bn_out = nn.BatchNorm1d(100)
def forward(self,x):
# Batch normalization
x = self.bn0(x)
# This applies Linear transformation to input data with non-linear activation
x = F.relu(self.fc1(x))
# Dropout
x = self.dropout(x)
x = self.bn1(x)
x = F.relu(self.fc2(x))
x = self.dropout(x)
x = self.bn_out(x)
#This applies linear transformation to produce output data
x = self.layer_out(x)
return x
#This function takes an input and predicts the class, (0 or 1)
def predict(self, x):
with torch.no_grad():
y_pred = model(x)
y_pred_tag = torch.round(torch.sigmoid(y_pred))
return torch.tensor(y_pred_tag, dtype=float)
def predict_proba(self, x):
with torch.no_grad():
y_pred = model(x)
prob = torch.sigmoid(y_pred)
return torch.tensor(prob, dtype=float)
def train_model(model, optim, criterion, train_dl):
model.train()
total = 0
sum_loss = 0
for x, y in train_dl:
batch = y.shape[0]
output = model(x)
loss = criterion(output, y.unsqueeze(1))
optim.zero_grad()
loss.backward()
# Clip gradient
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.1)
optim.step()
# Accumulate epoch loss
total += batch
sum_loss += batch*(loss.item())
# print("Batch loss: ", batch*(loss.item()))
return sum_loss/total
if __name__ == '__main__':
# Initialize the model
model = MyClassifier()
# Define loss criterion
criterion = nn.BCEWithLogitsLoss()
# Define the optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
print(15*'-', 'Model started training')
#Number of epochs
epochs = 150
#List to store losses
train_losses = []
for i in range(epochs):
epoch_loss = train_model(model=model, optim=optimizer, criterion=criterion, train_dl=train_loader)
train_losses.append(epoch_loss)
if i % 10 == 0:
print("Epoch {0}, Loss {1}".format(i+1, epoch_loss))
auc_sc = roc_auc_score(y_test_tensor.long(), model.predict_proba(x_test_tensor))
print('-'*15, "AUC score Network = ", auc_sc)
prob_voting = model.predict_proba(x_test_tensor)
# Plotting ROC-AUC
false_positive_rate, true_positive_rate, thresholds = roc_curve(y_test_tensor.numpy(), prob_voting.numpy())
roc_auc = auc(false_positive_rate, true_positive_rate)
plt.figure(figsize=(10,10))
plt.title('Receiver Operating Characteristic')
plt.plot(false_positive_rate,true_positive_rate, color='red',label = 'AUC = %0.2f' % roc_auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],linestyle='--')
plt.axis('tight')
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.savefig('./aucNN.jpg')
# Convert numpy to torch tensor and make prediction
X_submission_tensor = torch.tensor(X_submission_std, dtype=torch.float)
a = model.predict_proba(X_submission_tensor).numpy()
# Create submission
submission = pd.DataFrame(df_test["sample_id"], index=None)
submission["y"] = a
submission.to_csv("./Network_sabmission.csv", sep=",", index=False)
print("Submission created (NN).")
| [
"[email protected]"
] | |
6634da207df73bf435953807b98cb2f9663aeea0 | 3d705ec48c94373817e5f61d3f839988910431e3 | /misc/tool/player/create_m3u8.py | 9a477d1647cb3a6a26eaa517614fc0aad8c674d8 | [] | no_license | namesuqi/zeus | 937d3a6849523ae931162cd02c5a09b7e37ebdd8 | 3445b59b29854b70f25da2950016f135aa2a5204 | refs/heads/master | 2022-07-24T14:42:28.600288 | 2018-03-29T08:03:09 | 2018-03-29T08:03:09 | 127,256,973 | 0 | 0 | null | 2022-07-07T22:57:57 | 2018-03-29T07:53:16 | Python | UTF-8 | Python | false | false | 475 | py | # coding=utf-8
"""
不停地将m3u8子片段写入文件
用于测试直播播放器的模拟器
__author__ = 'zengyuetian'
"""
from random import randint
import time
if __name__ == "__main__":
for i in range(124):
time.sleep(randint(1, 4))
with open("zeus.m3u8", "a") as f:
url = "http://buddiestv.qiniudn.com/sUWPWh5odxh9vtorJ2tsEue__hQ=/lsmeSvostHYW3MuybV2NyHNYoRqS/seg{0}\n".format(str(i))
f.write(url)
f.flush() | [
"[email protected]"
] | |
86274f2d6be5ec0e61d7bdcfd74fd71805bf9bb9 | 0b3024565732948d36cb1ada431274db7fb2d6f1 | /PublicReference/config.py | 9559073ec5f12b9d486615af8f062953b627adf7 | [] | no_license | Luyaojun/DNFCalculating | adaa5b761e34985661d0c994ad52a63f092c6e62 | 02d701ec63ed083a8b7043229dfceb758e426cae | refs/heads/master | 2022-11-17T11:04:00.245554 | 2020-07-18T07:07:32 | 2020-07-18T07:07:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,886 | py | import configparser
conf = configparser.ConfigParser()
conf.read('./ResourceFiles/set.ini', encoding='utf-8')
#窗口缩放
try:
窗口显示模式 = conf.getint('窗口显示', 'value')
except:
窗口显示模式 = 0
#不计算装备属性,武器类型为角色武器选项第一个
try:
调试开关 = conf.getint('调试开关', 'value')
except:
调试开关 = 0
#输出搭配和伤害数据到csv
try:
输出数据 = conf.getint('输出数据', 'value')
except:
输出数据 = 0
#控制夜语黑瞳武器是否显示在第一页
try:
普雷武器显示 = 1 - conf.getint('夜语黑瞳', 'value')
except:
普雷武器显示 = 1
#排行里每把武器只会出现一次
try:
武器排名 = conf.getint('武器排名', 'value')
except:
武器排名 = 0
#怪物属性
try:
防御输入 = conf.getint('怪物属性', '防御')
火抗输入 = conf.getint('怪物属性', '火抗')
冰抗输入 = conf.getint('怪物属性', '冰抗')
光抗输入 = conf.getint('怪物属性', '光抗')
暗抗输入 = conf.getint('怪物属性', '暗抗')
except:
防御输入 = 443243
火抗输入 = 0
冰抗输入 = 0
光抗输入 = 0
暗抗输入 = 0
#武器序号
try:
武器序号 = conf.getint('武器序号', 'value')
except:
武器序号 = -1
#天劫
try:
天劫减防 = conf.getint('天劫', '减防生效')
天劫减抗 = conf.getint('天劫', '减抗生效')
except:
天劫减防 = 0
天劫减抗 = 0
#战术之王的御敌套装
try:
战术白字 = conf.getint('战术之王的御敌', '套装附加') / 100
except:
战术白字 = 0.40
#天御之灾
try:
天御套装 = conf.getint('天御之灾', '套装属性')
except:
天御套装 = 0
#千蛛碎影减防
try:
千蛛减防 = conf.getint('千蛛碎影', '减防生效')
except:
千蛛减防 = 0 | [
"[email protected]"
] | |
23cc18ee51c728fe6203cb595154805a17389974 | f5d2a1459c81eb23a745bd63f41ef980c41ea0a4 | /ZG-PhaseFour/code/website/pcauto/PcautoComments.py | 3d29bfc7086cc8fd34cc48ac7d373bdd125bca66 | [] | no_license | ErBingBing/django-tonado-crawler | 6800bb0269e99e2454fb0a9079175ffe9d4d0a0b | db31b4cdf7ecc509f1a87aa325621943df825e98 | refs/heads/master | 2021-08-22T11:30:08.419583 | 2017-11-30T04:04:40 | 2017-11-30T04:04:40 | 112,562,722 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,027 | py | # encoding=utf-8
##############################################################################################
# @file:PcautoComments.py
# @author:Ninghz
# @date:2016/11/18
# @note:太平洋汽车网获取评论的文件
# @修改日志
# @author:yongjicao
# @date:2017/9/12
# @note:修改评论存储方式为mysql
##############################################################################################
import json
import datetime
import traceback
import math
from utility.regexutil import RegexUtility
from website.common.comments import SiteComments
from storage.cmtstorage import CMTStorage
from storage.newsstorage import NewsStorage
from utility.gettimeutil import TimeUtility
from log.spiderlog import Logger
##############################################################################################
# @class:PcautoComments
# @author:Ninghz
# @date:2016/11/18
# @note:太平洋汽车网站获取评论的类,继承于SiteComments类
##############################################################################################
class PcautoComments(SiteComments):
COMMENTS_URL = 'http://cmt.pcauto.com.cn/action/comment/list_new_json.jsp?urlHandle=1&url=%s&pageNo=%d&pageSize=%d'
PAGE_SIZE = 50.0
STEP_1 = None
STEP_2 = 2
STEP_3 = 3
def __init__(self):
SiteComments.__init__(self)
self.r = RegexUtility()
##############################################################################################
# @functions:process
# @param:共通模块传入的参数(对象url, 原始url, 当前step数,自定义参数)
# @return:Step1:获取评论的首页url
# Step2:获取评论的所有url
# Step3: 抽出的评论和最新评论的创建时间
# @author:Ninghz
# @date:2016/11/16
# @note:Step1:通过共通模块传入的url,拼出获取评论总页数的url,并传递给共通模块
# Step2:通过共通模块传入的html内容获取到评论总页数,拼出获取评论的url,并传递给共通模块
# Step3:通过共通模块传入的html内容获取到评论和最新评论的创建时间,并传递给共通模块
##############################################################################################
def process(self, params):
try:
if params.step is None:
# 拼接第一页评论url
comments_url = PcautoComments.COMMENTS_URL % (params.originalurl, 1, PcautoComments.PAGE_SIZE)
#通知下载平台,根据评论url获取第一页评论内容
self.storeurl(comments_url, params.originalurl, PcautoComments.STEP_2)
#获取第一页评论内容,循环获取全部评论url
elif params.step == PcautoComments.STEP_2:
# 获取评论的Jason返回值
comments = json.loads(params.content)
# 获取评论页数
comments_count = int(comments['total'])
NewsStorage.setcmtnum(params.originalurl, comments_count)
if comments_count == 0:
return
# 判断增量
cmtnum = CMTStorage.getcount(params.originalurl, True)
if cmtnum >= comments_count:
return
page_num = int(math.ceil(float(comments_count - cmtnum) / self.PAGE_SIZE))
if page_num >= self.maxpages:
page_num = self.maxpages
# 循环拼接评论url,提交下载平台获取评论数据
for page in range(1, page_num + 1, 1):
commentUrl = PcautoComments.COMMENTS_URL % (params.originalurl, page, PcautoComments.PAGE_SIZE)
self.storeurl(commentUrl, params.originalurl, PcautoComments.STEP_3)
#解析评论数据
elif params.step == PcautoComments.STEP_3:
commentsinfo = json.loads(params.content)
comments = []
for comment in commentsinfo['data']:
updatetime = comment['createTime']
content = comment['content']
curtime = TimeUtility.getuniformtime(updatetime)
try:
nick = comment['nickName']
except:
nick = 'anonymous'
if not CMTStorage.exist(params.originalurl, content, curtime, nick):
CMTStorage.storecmt(params.originalurl, content, curtime, nick)
# if URLStorage.storeupdatetime(params.originalurl, updatetime):
# cmti = CommentInfo()
# cmti.content = comment['content']
# comments.append(cmti)
# if len(comments) > 0:
# self.commentstorage.store(params.originalurl, comments)
except Exception, e:
traceback.print_exc() | [
"[email protected]"
] | |
da6ebfdb2b8526dae263cda11c146cefa87e951a | 06e43389ade453e16d1ab08f6f29127a48ede422 | /sanp3.py | f3ef163a069faa011e7012056cb4ff3b7272d68d | [] | no_license | santhoshbabu4546/guvi1 | 0fd5b29c9bd7d26c2208e36cccca3c57ef58366f | 97a783c4ff8be92c9e0645ee605981177dc06dec | refs/heads/master | 2020-06-21T03:48:30.279600 | 2019-07-17T05:11:43 | 2019-07-17T05:11:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31 | py | f = input
n = f[::-1]
print(n)
| [
"[email protected]"
] | |
dc0cd93f35f2c1313c250efce57156a66fbf52bb | 4bf3aaf77c309a489100b98a8c03532632df152c | /Python/동빈북/10주차2/트리의지름.py | 5a204ed65235c2f7a796144aba287d2f73437648 | [] | no_license | murane/PS | 7fbfc54d962231949efc67f1a35c4b0119de0780 | e938c6c503aeac08bf65e1e66709172b0e5da6ef | refs/heads/master | 2023-05-06T22:51:54.105811 | 2021-05-30T03:34:53 | 2021-05-30T03:34:53 | 293,699,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | import sys
r=sys.stdin.readline
V=int(r())
tree=[dict() for _ in range(V+1)]
for _ in range(V):
line=list(map(int,r().split()))
target=line[0]
line=line[1:-1]
for i in range(len(line)//2):
tree[target][line[i*2]]=line[i*2+1]
tree[line[i*2]][target]=line[i*2+1]
leng=0
pole=0
visit=[False]*(V+1)
def dfs(V,Cur):
global leng
global pole
visit[V]=True
if leng<Cur:
leng=Cur
pole=V
for Node,W in tree[V].items():
if not visit[Node]:
visit[Node]=True
dfs(Node,Cur+W)
dfs(1,0)
leng=0
visit=[False]*(V+1)
dfs(pole,0)
print(leng)
| [
"[email protected]"
] | |
e386fe6a75e7df2fb336f91983882abcc80f5e28 | 4331b28f22a2efb12d462ae2a8270a9f666b0df1 | /.history/dvdstore/webapp/views_20190914143025.py | 52963ede814339e5240ae95de166024a88b2f934 | [] | no_license | ZiyaadLakay/csc312.group.project | ba772a905e0841b17478eae7e14e43d8b078a95d | 9cdd9068b5e24980c59a53595a5d513c2e738a5e | refs/heads/master | 2020-07-26T23:30:22.542450 | 2019-09-16T11:46:41 | 2019-09-16T11:46:41 | 200,703,160 | 0 | 0 | null | 2019-08-05T17:52:37 | 2019-08-05T17:52:37 | null | UTF-8 | Python | false | false | 9,050 | py | from django.shortcuts import render
from .models import DVD, Transaction, Customer
from django.core.paginator import EmptyPage,PageNotAnInteger, Paginator
from django.db.models import Q
from django.contrib.auth.models import User, auth
from django.shortcuts import render, redirect
from django.contrib import messages
from django.core.files.storage import FileSystemStorage
from django.contrib.auth.decorators import login_required, permission_required
from .form import DocumentForm
import datetime
#This is the homepage for the User
def home(request):
dvds = DVD.objects.all() #imports dvds from database
query = request.GET.get("query")
gen = request.GET.get("gen")
if query:
dvds = DVD.objects.filter(Q(Title__icontains=query))#Search Function according to name
if not DVD.objects.filter(Q(Title__icontains=query)).exists():
messages.info(request,'No search results for : '+query)
elif gen:
dvds = DVD.objects.filter(Q(genre__icontains=gen))#Search Function according to name
paginator = Paginator(dvds, 6) # Show 3 dvds per page
page = request.GET.get('page')
dvds = paginator.get_page(page)
genre = {'Action', 'Comedy', 'Drama', 'Family', 'Romance'}
return render(request, 'home.html', {'dvds':dvds}, {'genre':genre}) #renders the page
#This is the page for clerks
@login_required
def clerk(request):
dvds = DVD.objects.all() #imports dvds from database
trans = Transaction.objects.all() #imports dvds from database
users = User.objects.all() #imports dvds from database
customer = Customer.objects.all() #imports dvds from database
query = request.GET.get("query")
if query:
dvds = DVD.objects.filter(Q(Title__icontains=query)) #Search Function according to name
paginator = Paginator(dvds, 6) # Show 3 dvds per page
page = request.GET.get('page')
dvds = paginator.get_page(page)
form=DocumentForm()
context_dict = { 'dvds':dvds ,'form': form, 'trans':trans, 'users':users, 'customer':customer}
return render(request, 'clerk.html',context_dict)
@login_required
def userstbl(request):
dvds = DVD.objects.all() #imports dvds from database
trans = Transaction.objects.all() #imports dvds from database
users = User.objects.all() #imports dvds from database
customer = Customer.objects.all() #imports dvds from database
query = request.GET.get("query")
if query:
users = User.objects.filter(Q(username__icontains=query)) #Search Function according to name
paginator = Paginator(dvds, 6) # Show 3 dvds per page
page = request.GET.get('page')
dvds = paginator.get_page(page)
form=DocumentForm()
context_dict = { 'dvds':dvds ,'form': form, 'trans':trans, 'users':users, 'customer':customer}
return render(request, 'userstbl.html',context_dict)
@login_required
def transactions(request):
dvds = DVD.objects.all() #imports dvds from database
trans = Transaction.objects.all() #imports dvds from database
users = User.objects.all() #imports dvds from database
customer = Customer.objects.all() #imports dvds from database
query = request.GET.get("query")
if query:
trans = Transaction.objects.filter(Q(TransactionNumber__icontains=query)) #Search Function according to name
paginator = Paginator(dvds, 6) # Show 3 dvds per page
page = request.GET.get('page')
dvds = paginator.get_page(page)
form=DocumentForm()
context_dict = { 'dvds':dvds ,'form': form, 'trans':trans, 'users':users, 'customer':customer}
return render(request, 'transactions.html',context_dict)
def register2(request):
if request.method == 'POST':
first_name= request.POST['first_name']
last_name= request.POST['last_name']
username= request.POST['username']
email= request.POST['email']
password1= first_name[0]+last_name
if User.objects.filter(username=username).exists():
messages.info(request, 'Username Taken')
return redirect('clerk')
elif User.objects.filter(email=email).exists():
messages.info(request, 'Email Taken')
user = User.objects.create_user(username=username, password=password1, email=email, first_name=first_name, last_name=last_name)
user.save()
messages.info(request, 'User Created')
return redirect('/clerk')
def model_form_upload(request):
if request.method == 'POST':
form = DocumentForm(request.POST, request.FILES)
if form.is_valid():
form.save()
return redirect('/clerk')
def booking(request):
username= request.POST['username']
dvdID= request.POST['dvdID']
DVD.objects.filter(id=dvdID).update(BookingPickup=username)
return redirect('home')
def checkout(request):
dvdID= request.POST['dvdID']
numOfDays=request.POST['numDaysBooked']
dvdPrice=request.POST['dvdPrice']
users_ID=request.POST['user_ID']
MovieTitle=request.POST['MovieTitle']
payment=request.POST['payment']
bill=int(numOfDays)*int(dvdPrice)
DVD.objects.filter(id=dvdID).update(NumDaysBooked=numOfDays,InStock=False)
RentDate= datetime.date.today()
DueDate=RentDate+datetime.timedelta(days=int(numOfDays))
t = datetime.datetime.now().strftime("%H%M%S")
TransactionNumber=payment+str(RentDate)[0:4]+str(RentDate)[8:10]+t
#Amount
trans = Transaction(users_ID=users_ID, TransactionNumber=TransactionNumber, RentDate=RentDate, DueDate=DueDate, MovieTitle=MovieTitle, Payment_Method=payment,Amount="R"+str(bill),dvdID=dvdID)
trans.save()
return redirect('/clerk')
def checkin(request):
dvdID= request.POST['dvdID']
DVD.objects.filter(id=dvdID).update(BookingPickup='None',InStock=True,NumDaysBooked=0)
return redirect('/clerk')
def deleteMovie(request):
dvdID= request.POST['dvdID']
DVD.objects.filter(id=dvdID).delete()
return redirect('/clerk')
def deleteTransaction(request):
transID= request.POST['transID']
Transaction.objects.filter(id=transID).delete()
return redirect('/transactions')
def deleteUser(request):
userID= request.POST['userID']
User.objects.filter(id=userID).delete()
return redirect('/userstbl')
def user_detail(request):
id = None
if request.user.is_authenticated:
id = request.user.id
detail1 = User.objects.filter( id = id )
detail2 = Customer.objects.filter( id = id )
return render(request, 'user_detail.html',{'detail1':detail1 , 'detail2' : detail2})
def registerCustomer(request):
if request.method == 'POST':
first_name= request.POST['first_name']
last_name= request.POST['last_name']
phone_number= request.POST['phone_number']
address= request.POST['address']
identification= request.POST['identification']
email= request.POST['email']
password1= request.POST['password1']
password2= request.POST['password2']
username= request.POST['username']
if password1 == password2 :
if Customer.objects.filter(username=username).exists():
messages.info(request, 'Username Taken')
return redirect('register.html')
elif Customer.objects.filter(email=email).exists():
messages.info(request, 'Email Taken')
return redirect('register.html')
user = Customer.objects.create_user(phone_number=phone_number, address=address,identification=identification,username=username, password=password1, email=email, first_name=first_name, last_name=last_name)
# customer = Customer.objects.create_user(phone_number=phone_number,identification=identification,address=address)
user.save()
# customer.save()
messages.info(request, 'User Created')
# messages.info(request, 'Customer Created')
return redirect('login.html')
else:
print('password does not match')
messages.info(request, 'Password does not match')
return redirect('register.html')
return redirect('login.html')
else:
return render(request, 'register.html')
def updateCustomer(request):
if request.method == 'POST':
first_name= request.POST['first_name']
last_name= request.POST['last_name']
phone_number= request.POST['phone_number']
address= request.POST['address']
identification= request.POST['identification']
email= request.POST['email']
username= request.POST['username']
userID=
user = Customer.objects.filter(id=dvdID).update(phone_number=phone_number, address=address,identification=identification,username=username, email=email, first_name=first_name, last_name=last_name)
# customer = Customer.objects.create_user(phone_number=phone_number,identification=identification,address=address)
user.save()
return redirect('home')
| [
"[email protected]"
] | |
381983aa73140a1fb84cd80fcf777075c6e1922f | 99c4d4a6592fded0e8e59652484ab226ac0bd38c | /code/batch-1/vse-naloge-brez-testov/DN7-M-175.py | 9f07005e81b8ff32835a51038b44d55f2690253c | [] | no_license | benquick123/code-profiling | 23e9aa5aecb91753e2f1fecdc3f6d62049a990d5 | 0d496d649247776d121683d10019ec2a7cba574c | refs/heads/master | 2021-10-08T02:53:50.107036 | 2018-12-06T22:56:38 | 2018-12-06T22:56:38 | 126,011,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,198 | py | # To funkcijo prijazno podarjam vsem, ki bodo programirali v eni vrstici. :)
# Kako jo uporabiti, je v navodilih. Kdor je ne potrebuje, naj jo ignorira.
import collections
def vsa_polja(s, v):
"""
Generiraj vse koordinate (x, y) za polje s podano širino in višino
Args:
s (int): širina
v (int): višina
Returns:
generator parov polj
"""
return ((x, y) for x in range(s) for y in range(v))
########################
# Za oceno 6
def sosedov(x, y, mine):
"""
Vrni število sosedov polja s koordinatami `(x, y)` na katerih je mina.
Polje samo ne šteje.
Args:
x (int): koordinata x
y (int): koordinata y
mine (set of tuple of int): koordinate min
Returns:
int: število sosedov
"""
def sosedov(x, y, mine):
k = 0
for x1, y1, in mine:
if abs(x - x1) <= 1 and abs(y - y1) <= 1 and (x, y) != (x1, y1):
k += 1
return k
def najvec_sosedov(mine, s, v):
"""
Vrni koordinati polja z največ sosednjih min
Args:
mine (set of (int, int)): koordinate min
s (int): širina polja
v (int): višina polja
Returns:
tuple of int: koordinati polja
"""
def najvec_sosedov(mine, s, v):
sl = collections.defaultdict(list)
for x, y in vsa_polja(s, v):
sl[sosedov(x, y, mine)].append((x, y))
return sl[max(sl)][0]
def brez_sosedov(mine, s, v):
"""
Vrni množico koordinat polj brez min na sosednjih poljih. Polje samo lahko
vsebuje mino.
Args:
mine (set of tuple of int): koordinate min
s (int): širina polja
v (int): višina polja
Returns:
set of tuple: polja brez min na sosednjih poljih
"""
def brez_sosedov(mine, s, v):
sl = collections.defaultdict(set)
for x, y in vsa_polja(s, v):
sl[sosedov(x, y, mine)].add((x, y))
return sl[0]
def po_sosedih(mine, s, v):
"""
Vrni slovar, katerega ključi so možna števila sosednjih polj z minami
(torej števila od 0 do 8), vrednosti pa množice koordinat polj s toliko
sosedami.
Args:
mine (set of tuple of int): koordinate min
s (int): širina polja
v (int): višina polja
Returns:
dict: (glej zgoraj)
"""
def po_sosedih(mine, s, v):
sl = {}
for i in range(9):
sl[i] = set()
for x, y in vsa_polja(s, v):
sl[sosedov(x, y, mine)].add((x, y))
return sl
########################
# Za oceno 7
def dolzina_poti(pot):
"""
Vrni dolžino podane poti, vključno z vmesnimi polji.
Args:
pot (list of tuple): seznam koordinat polj
Returns:
int: dolžina poti
"""
def dolzina_poti(pot):
k = 0
for (x0, y0), (x1, y1) in zip(pot, pot[1:]):
if x0 != x1:
k += abs(x1 - x0)
if y0 != y1:
k += abs(y1 - y0)
return k
def varen_premik(x0, y0, x1, y1, mine):
"""
Vrni `True`, če je pomik z (x0, y0) and (x1, y1) varen, `False`, če ni.
Args:
x0 (int): koordinata x začetnega polja
y0 (int): koordinata y začetnega polja
x1 (int): koordinata x končnega polja
y1 (int): koordinata y končnega polja
mine (set of tuple of int): koordinate min
Returns:
bool: `True`, če je premik varen, `False`, če ni.
"""
def varen_premik(x0, y0, x1, y1, mine):
if (x0, y0) in mine or (x1, y1) in mine:
return False
if x0 != x1:
if x1 < x0:
while x1 != x0:
x0 -= 1
if(x0, y0) in mine:
return False
else:
while x1 != x0:
x0 += 1
if(x0, y0) in mine:
return False
else:
if y1 < y0:
while y1 != y0:
y0 -= 1
if(x0, y0) in mine:
return False
else:
while y1 != y0:
y0 += 1
if(x0, y0) in mine:
return False
return True
def varna_pot(pot, mine):
"""
Vrni `True`, če je podana pot varna, `False`, če ni.
Args:
pot (list of tuple of int): koordinate točk na poti (brez vmesnih točk)
mine (set of tuple of int): koordinate min
Returns:
bool: `True`, če je pot varna, `False`, če ni.
"""
def varna_pot(pot, mine):
if len(pot) == 1 and pot[0] in mine:
return False
for pot1, pot2 in zip(pot, pot[1:]):
if not varen_premik(pot1[0], pot1[1], pot2[0], pot2[1], mine):
return False
return True
########################
# Za oceno 8
def polje_v_mine(polje):
"""
Vrni koordinate min v podanem polju.
Niz polje opisuje polje tako, da so vodoravne "vrstice" polja ločene s
presledki. Prosta polja so označena z znako `.`, mine z `X`.
Args:
polje (str): polje
Returns:
mine (set of tuple of int): koordinate min
s (int): širina polja
v (int): višina polja.
"""
def polje_v_mine(polje):
kor = set()
v = -1
for pole in polje.split():
v += 1
for s in range(len(pole)):
if pole[s] == "X":
kor.add((s, v))
return kor, s + 1, v + 1
########################
# Za oceno 9
#
# Vse funkcije za oceno 6 in 7 morajo biti napisane v eni vrstici.
def sosedov(x, y, mine):
return sum([1 for x1, y1 in mine if abs(x - x1) <= 1 and abs(y - y1) <= 1 and (x, y) != (x1, y1)])
def dolzina_poti(pot):
return sum([abs(x1 - x0) + abs(y1 - y0) for (x0, y0), (x1, y1) in zip(pot, pot[1:])])
def brez_sosedov(mine, s, v):
return {a for a, b in {(x, y):sosedov(x, y, mine) for x, y in vsa_polja(s, v)}.items() if b == 0}
def varna_pot(pot, mine):
return all([not (len(pot) == 1 and pot[0] in mine1)] +
[varen_premik(pot1[0], pot1[1], pot2[0], pot2[1], mine1)
for pot1, pot2 in zip(pot, pot[1:])])
def varen_premik(x0, y0, x1, y1, mine):
return all([not((x0, y0) in mine or (x1, y1) in mine)] + \
[not((x, y0) in mine) for x in range(x0, x1) if x0 < x1] + \
[not((x, y0) in mine) for x in range(x0, x1, -1) if x0 > x1] + \
[not ((x0, y) in mine) for y in range(y0, y1) if y0 < y1] + \
[not((x0, y) in mine) for y in range(y0, y1, -1) if y0 > y1])
def najvec_sosedov(mine, s, v):
return max({sosedov(x, y, mine):(x, y) for x, y in vsa_polja(s, v)}.items())[1]
def po_sosedih(mine, s, v):
return {a:{(x, y)for x, y in vsa_polja(s, v) if a == sosedov(x, y, mine)} for a, b in {i:{1, 2, 3} for i in range(9)}.items()}
########################
# Za oceno 10
def preberi_pot(ukazi):
"""
Za podani seznam ukazov (glej navodila naloge) vrni pot.
Args:
ukazi (str): ukazi, napisani po vrsticah
Returns:
list of tuple of int: pot
"""
def preberi_pot(ukazi):
x = y = nasoka = 0
pot = [(x, y)]
for ukaz in ukazi.split():
if ukaz.isalpha():
if ukaz == "DESNO":
nasoka += 1
else:
nasoka -= 1
if nasoka < -3 or nasoka > 3:
nasoka = 0
else:
if nasoka == 1 or nasoka == -3: #360
x += int(ukaz)
elif nasoka == 2 or nasoka == -2: #270
y += int(ukaz)
elif nasoka == 3 or nasoka == -1: #180
x -= int(ukaz)
else:
y -= int(ukaz)
pot.append((x, y))
return pot
def zapisi_pot(pot):
"""
Za podano pot vrni seznam ukazov (glej navodila naloge).
Args:
pot (list of tuple of int): pot
Returns:
str: ukazi, napisani po vrsticah
"""
def zapisi_pot(pot):
potmoj = []
ukazi = ""
for (x0, y0), (x1, y1) in zip(pot, pot[1:]):
if (x0, y0) not in potmoj:
potmoj.append((x0, y0))
if (x1, y1) not in potmoj:
potmoj.append((x1, y1))
k = abs(x1 - x0) + abs(y1 - y0)
while preberi_pot(ukazi + str(k)) != potmoj:
ukazi += " LEVO "
ukazi += str(k)
return ukazi
| [
"[email protected]"
] | |
14d337cc0ae57c9c93b5155c34ee19114e1317c7 | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/helpers/typeshed/stubs/openpyxl/openpyxl/drawing/connector.pyi | 3b3e8a479f72006e2689a5b70ac025c01e2a488f | [
"Apache-2.0",
"MIT"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 1,867 | pyi | from typing import Any
from openpyxl.descriptors.serialisable import Serialisable
class Connection(Serialisable):
id: Any
idx: Any
def __init__(self, id: Any | None = ..., idx: Any | None = ...) -> None: ...
class ConnectorLocking(Serialisable):
extLst: Any
def __init__(self, extLst: Any | None = ...) -> None: ...
class NonVisualConnectorProperties(Serialisable):
cxnSpLocks: Any
stCxn: Any
endCxn: Any
extLst: Any
def __init__(
self, cxnSpLocks: Any | None = ..., stCxn: Any | None = ..., endCxn: Any | None = ..., extLst: Any | None = ...
) -> None: ...
class ConnectorNonVisual(Serialisable):
cNvPr: Any
cNvCxnSpPr: Any
__elements__: Any
def __init__(self, cNvPr: Any | None = ..., cNvCxnSpPr: Any | None = ...) -> None: ...
class ConnectorShape(Serialisable):
tagname: str
nvCxnSpPr: Any
spPr: Any
style: Any
macro: Any
fPublished: Any
def __init__(
self,
nvCxnSpPr: Any | None = ...,
spPr: Any | None = ...,
style: Any | None = ...,
macro: Any | None = ...,
fPublished: Any | None = ...,
) -> None: ...
class ShapeMeta(Serialisable):
tagname: str
cNvPr: Any
cNvSpPr: Any
def __init__(self, cNvPr: Any | None = ..., cNvSpPr: Any | None = ...) -> None: ...
class Shape(Serialisable):
macro: Any
textlink: Any
fPublished: Any
fLocksText: Any
nvSpPr: Any
meta: Any
spPr: Any
graphicalProperties: Any
style: Any
txBody: Any
def __init__(
self,
macro: Any | None = ...,
textlink: Any | None = ...,
fPublished: Any | None = ...,
fLocksText: Any | None = ...,
nvSpPr: Any | None = ...,
spPr: Any | None = ...,
style: Any | None = ...,
txBody: Any | None = ...,
) -> None: ...
| [
"[email protected]"
] | |
51773bb0b5cf61f1eb2f13888232b12c252e6fbe | f121695e2dff353607fa47fb42482470e03bbf8a | /capitulo_06-Dicionarios/user.py | 27f9b3f93f445d4a391344208f4b9726d72af033 | [] | no_license | ranog/python_work | 76cbcf784c86fae4482be5383223e4b0a34f4130 | 47c442a90dcf32d5aef70858693a772a3c76a7ac | refs/heads/master | 2022-12-22T11:02:26.482059 | 2021-04-17T01:12:22 | 2021-04-17T01:12:22 | 233,634,221 | 2 | 1 | null | 2022-12-08T07:38:43 | 2020-01-13T15:58:46 | Python | UTF-8 | Python | false | false | 2,642 | py | #! /usr/bin/env python3
"""
NOME
user.py - Percorrendo todos os pares chave-valor com um laço
SINOPSES
chmod +x user.py
./user.py
Key: username
Value: efermi
Key: first
Value: enrico
Key: last
Value: fermi
- 6.12 - Extensões:
Username: efermi
Name: Erico Fermi
Username: aeinstein
Name: Albert Einstein
Username: mcurie
Name: Marie Curie
DESCRIÇÃO
Para escrever um laço for para um dicionário, devemos criar nomes
para as duas variáveis que armazenarão a chave e o valor de cada par
chave-valor. Você pode escolher qualquer nome que quiser para essas
duas variáveis. Esse código também funcionaria bem se usássemos
abreviaturas para os nomes das variáveis, assim: for k, v in
user_0.items(). A segunda metade da instrução for inclui o nome do
dicionário, seguido do método items(), que devolve uma lista de
pares chave-valor. O laço for então armazena cada um desses pares
nas duas variáveis especificadas. No exemplo anterior, usamos as
variáveis para exibir cada chave (key), seguido do valor associado
(value). O "\n" na primeira instrução print garante que uma linha em
branco seja inserida antes de cada par chave-valor na saída.
Python não se importa com a ordem em que os pares chave-valor são
armazenados; ele só registra as conexões entre cada chave individual
e seu valor.
6.12 – Extensões: Estamos trabalhando agora com exemplos complexos o
bastante para poderem ser estendidos de várias maneiras. Use um dos
programas de exemplo deste capítulo e estenda-o acrescentando novas
chaves e valores, alterando o contexto do programa ou melhorando a
formatação da saída.
----------------------------------------------------------------------
HISTÓRICO
20202410: João Paulo, outubro de 2020.
- Percorrendo todos os pares chave-valor com um laço
(Pag. 138-139).
20202810: João Paulo, outubro de 2020.
- FAÇA VOCÊ MESMO 6.12 - Extensões (pg 150).
"""
user_0 = {'username' : 'efermi', 'first' : 'enrico', 'last' : 'fermi',}
for key, value in user_0.items():
print("\nKey: " + key)
print("Value: " + value)
print("\n- 6.12 - Extensões:")
users = {
'efermi' : {'first' : 'erico', 'last' : 'fermi',},
'aeinstein' : {'first' : 'albert', 'last': 'einstein',},
'mcurie': {'first': 'marie', 'last': 'curie',},}
for key, value in users.items():
print("\nUsername: " + key)
full_name = value['first'] + " " + value['last']
print("Name: " + full_name.title())
| [
"[email protected]"
] | |
7a14e8464e65364030788781634321eba09f3d7f | 80d50ea48e10674b1b7d3f583a1c4b7d0b01200f | /examples/v1/synthetics/CreateSyntheticsAPITest.py | 84904e1d7992fcae134d9955d3c134e4da54febc | [
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"MPL-2.0"
] | permissive | DataDog/datadog-api-client-python | 3e01fa630278ad0b5c7005f08b7f61d07aa87345 | 392de360e7de659ee25e4a6753706820ca7c6a92 | refs/heads/master | 2023-09-01T20:32:37.718187 | 2023-09-01T14:42:04 | 2023-09-01T14:42:04 | 193,793,657 | 82 | 36 | Apache-2.0 | 2023-09-14T18:22:39 | 2019-06-25T22:52:04 | Python | UTF-8 | Python | false | false | 4,304 | py | """
Create an API test returns "OK - Returns the created test details." response
"""
from datadog_api_client import ApiClient, Configuration
from datadog_api_client.v1.api.synthetics_api import SyntheticsApi
from datadog_api_client.v1.model.synthetics_api_test import SyntheticsAPITest
from datadog_api_client.v1.model.synthetics_api_test_config import SyntheticsAPITestConfig
from datadog_api_client.v1.model.synthetics_api_test_type import SyntheticsAPITestType
from datadog_api_client.v1.model.synthetics_assertion_operator import SyntheticsAssertionOperator
from datadog_api_client.v1.model.synthetics_assertion_target import SyntheticsAssertionTarget
from datadog_api_client.v1.model.synthetics_assertion_type import SyntheticsAssertionType
from datadog_api_client.v1.model.synthetics_browser_test_rum_settings import SyntheticsBrowserTestRumSettings
from datadog_api_client.v1.model.synthetics_device_id import SyntheticsDeviceID
from datadog_api_client.v1.model.synthetics_restricted_roles import SyntheticsRestrictedRoles
from datadog_api_client.v1.model.synthetics_test_ci_options import SyntheticsTestCiOptions
from datadog_api_client.v1.model.synthetics_test_details_sub_type import SyntheticsTestDetailsSubType
from datadog_api_client.v1.model.synthetics_test_execution_rule import SyntheticsTestExecutionRule
from datadog_api_client.v1.model.synthetics_test_options import SyntheticsTestOptions
from datadog_api_client.v1.model.synthetics_test_options_http_version import SyntheticsTestOptionsHTTPVersion
from datadog_api_client.v1.model.synthetics_test_options_monitor_options import SyntheticsTestOptionsMonitorOptions
from datadog_api_client.v1.model.synthetics_test_options_retry import SyntheticsTestOptionsRetry
from datadog_api_client.v1.model.synthetics_test_options_scheduling import SyntheticsTestOptionsScheduling
from datadog_api_client.v1.model.synthetics_test_options_scheduling_timeframe import (
SyntheticsTestOptionsSchedulingTimeframe,
)
from datadog_api_client.v1.model.synthetics_test_pause_status import SyntheticsTestPauseStatus
from datadog_api_client.v1.model.synthetics_test_request import SyntheticsTestRequest
body = SyntheticsAPITest(
config=SyntheticsAPITestConfig(
assertions=[
SyntheticsAssertionTarget(
operator=SyntheticsAssertionOperator.LESS_THAN,
target=1000,
type=SyntheticsAssertionType.RESPONSE_TIME,
),
],
request=SyntheticsTestRequest(
method="GET",
url="https://example.com",
),
),
locations=[
"aws:eu-west-3",
],
message="Notification message",
name="Example test name",
options=SyntheticsTestOptions(
ci=SyntheticsTestCiOptions(
execution_rule=SyntheticsTestExecutionRule.BLOCKING,
),
device_ids=[
SyntheticsDeviceID.LAPTOP_LARGE,
],
http_version=SyntheticsTestOptionsHTTPVersion.HTTP1,
monitor_options=SyntheticsTestOptionsMonitorOptions(),
restricted_roles=SyntheticsRestrictedRoles(
[
"xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
]
),
retry=SyntheticsTestOptionsRetry(),
rum_settings=SyntheticsBrowserTestRumSettings(
application_id="xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
client_token_id=12345,
is_enabled=True,
),
scheduling=SyntheticsTestOptionsScheduling(
timeframes=[
SyntheticsTestOptionsSchedulingTimeframe(
day=1,
_from="07:00",
to="16:00",
),
SyntheticsTestOptionsSchedulingTimeframe(
day=3,
_from="07:00",
to="16:00",
),
],
timezone="America/New_York",
),
),
status=SyntheticsTestPauseStatus.LIVE,
subtype=SyntheticsTestDetailsSubType.HTTP,
tags=[
"env:production",
],
type=SyntheticsAPITestType.API,
)
configuration = Configuration()
with ApiClient(configuration) as api_client:
api_instance = SyntheticsApi(api_client)
response = api_instance.create_synthetics_api_test(body=body)
print(response)
| [
"[email protected]"
] | |
50942d27099e308aa4147588ac09f780f7856048 | f9308d5a8efe2dbb48e9cc87cd06405b60a9dc7b | /samples/python/apidocs/ee_featurecollection_getstring.py | 8f9996c332c70c0c6ba24aec13ae4de989df6de8 | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | google/earthengine-community | 4e054b421f66f03507d58668084aee981062fc24 | ce931040c518860f8788b4888c0acfdebd2952fc | refs/heads/master | 2023-09-01T14:47:54.812703 | 2023-08-31T23:01:00 | 2023-08-31T23:01:39 | 200,732,820 | 428 | 552 | Apache-2.0 | 2023-09-13T21:46:51 | 2019-08-05T21:42:11 | Jupyter Notebook | UTF-8 | Python | false | false | 1,007 | py | # Copyright 2023 The Google Earth Engine Community Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START earthengine__apidocs__ee_featurecollection_getstring]
# A FeatureCollection with a string property value.
fc = ee.FeatureCollection([]).set('string_property', 'Abies magnifica')
# Fetch the string property value as an ee.String object.
print('String property value as ee.String:',
fc.getString('string_property').getInfo())
# [END earthengine__apidocs__ee_featurecollection_getstring]
| [
"[email protected]"
] | |
548439ff669f3f0d005ab4790ec8a3cb6a20f164 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_officiates.py | 4b9a0f7403716650f0c91ed440000739919939b4 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py |
#calss header
class _OFFICIATES():
def __init__(self,):
self.name = "OFFICIATES"
self.definitions = officiate
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['officiate']
| [
"[email protected]"
] | |
8aed7256c3bb58a3dd4968ada6a007943f82ab89 | 3034cb06289f747066571c4ab54ca81996c22319 | /module_utils/RubrikLib/rubrik_lib/models/download_file_job_config.py | 54fb2f3618d7eb072adffba24a2445ad05a26500 | [] | no_license | tarunactivity/ansible-rubrik | b2f644805f13a553bd0635e6ddc230257d125ef7 | 5d978c23902fd32d92cc90c75e48e5fe2209f8e0 | refs/heads/master | 2023-04-29T04:25:26.834701 | 2023-04-20T21:58:47 | 2023-04-20T21:58:47 | 116,251,368 | 0 | 0 | null | 2018-01-04T11:18:38 | 2018-01-04T11:18:37 | null | UTF-8 | Python | false | false | 684 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class DownloadFileJobConfig(Model):
"""DownloadFileJobConfig.
:param path: Absolute file path
:type path: str
"""
_validation = {
'path': {'required': True},
}
_attribute_map = {
'path': {'key': 'path', 'type': 'str'},
}
def __init__(self, path):
self.path = path
| [
"[email protected]"
] | |
af10e15a9c827ae8f34a1e8261c1ff400154fbab | 6d13de0d1ca89badfb76c677ffa8d7e2829677cb | /beaconWeb/apps/beacon/migrations/0076_auto__add_field_contactstatus_phone_number.py | 92b3a45c1c00040898605af926be5f2baa75b57c | [] | no_license | jasjitsingh85/beaconweb | 08a2b97346aea6db87dd19567c39a0d99f383ae8 | 269c6683f759fd7e75d13ea9eec8ad63ee24df53 | refs/heads/master | 2021-01-13T03:43:09.308401 | 2016-12-24T16:12:15 | 2016-12-24T16:12:15 | 77,268,841 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,940 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'ContactStatus.phone_number'
db.add_column(u'beacon_contactstatus', 'phone_number',
self.gf('django.db.models.fields.CharField')(max_length=20, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'ContactStatus.phone_number'
db.delete_column(u'beacon_contactstatus', 'phone_number')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'beacon.beacon': {
'Meta': {'object_name': 'Beacon'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cancelled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_hotspots'", 'to': u"orm['auth.User']"}),
'custom_deal_message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'facebook_place_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isActivated': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'db_index': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'db_index': 'True'}),
'private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'time': ('django.db.models.fields.DateTimeField', [], {})
},
'beacon.beaconfollow': {
'Meta': {'object_name': 'BeaconFollow'},
'beacon': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'beacon_follows'", 'to': "orm['beacon.Beacon']"}),
'checked_in_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'check_ins'", 'null': 'True', 'to': u"orm['auth.User']"}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['beacon.Contact']", 'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'sent_invites'", 'null': 'True', 'to': u"orm['auth.User']"}),
'saw_invite': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'state': ('django.db.models.fields.CharField', [], {'default': "'I'", 'max_length': '10'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'received_invites'", 'null': 'True', 'to': u"orm['auth.User']"})
},
'beacon.contact': {
'Meta': {'object_name': 'Contact'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'normalized_phone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'contacts'", 'to': u"orm['auth.User']"})
},
'beacon.contactgroup': {
'Meta': {'object_name': 'ContactGroup'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'contact_groups'", 'to': u"orm['auth.User']"})
},
'beacon.contactstatus': {
'Meta': {'object_name': 'ContactStatus'},
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['beacon.Contact']", 'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deal_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['beacon.DealStatus']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'installed_app': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'link_clicked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'sms_response': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '1', 'null': 'True', 'blank': 'True'})
},
'beacon.contentoption': {
'Meta': {'object_name': 'ContentOption'},
'content_option': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_location': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'beacon.deal': {
'Meta': {'object_name': 'Deal'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'additional_info': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'bonus_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'bonus_description_short': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'bonus_invite_requirement': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'deal_description': ('django.db.models.fields.TextField', [], {}),
'deal_description_short': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'deal_type': ('django.db.models.fields.CharField', [], {'default': "'DT'", 'max_length': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_app_payment': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'invite_description': ('django.db.models.fields.TextField', [], {}),
'invite_prompt': ('django.db.models.fields.TextField', [], {}),
'invite_requirement': ('django.db.models.fields.IntegerField', [], {}),
'item_cost': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'item_market_price': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'item_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'item_price': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'notification_text': ('django.db.models.fields.TextField', [], {}),
'place': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['beacon.DealPlace']"}),
'reward_eligibility': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'beacon.dealhours': {
'Meta': {'object_name': 'DealHours'},
'days_active': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'deal': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'hours'", 'to': "orm['beacon.Deal']"}),
'end': ('django.db.models.fields.FloatField', [], {}),
'event_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'open_hours': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'start': ('django.db.models.fields.FloatField', [], {})
},
'beacon.dealplace': {
'Meta': {'object_name': 'DealPlace'},
'facebook_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'foursquare_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_url': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'db_index': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'place_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'street_address': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'yelp_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'yelp_rating_image_url': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'yelp_review_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'beacon.dealstatus': {
'Meta': {'object_name': 'DealStatus'},
'beacon': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'deal_statuses'", 'to': "orm['beacon.Beacon']"}),
'bonus_status': ('django.db.models.fields.CharField', [], {'default': "'L'", 'max_length': '10'}),
'checked_in_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deal_check_ins'", 'null': 'True', 'to': u"orm['auth.User']"}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deal_statuses'", 'null': 'True', 'to': "orm['beacon.Contact']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'deal': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'deal_statuses'", 'to': "orm['beacon.Deal']"}),
'deal_status': ('django.db.models.fields.CharField', [], {'default': "'L'", 'max_length': '10'}),
'end': ('django.db.models.fields.DateTimeField', [], {}),
'feedback': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['beacon.Feedback']", 'null': 'True', 'blank': 'True'}),
'hours': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'deal_statuses'", 'to': "orm['beacon.DealHours']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_url': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'invited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deal_sent_invites'", 'null': 'True', 'to': u"orm['auth.User']"}),
'isSubmitted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'payment_authorization': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'saw_invite': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'start': ('django.db.models.fields.DateTimeField', [], {}),
'state': ('django.db.models.fields.CharField', [], {'default': "'I'", 'max_length': '10'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deal_statuses'", 'null': 'True', 'to': u"orm['auth.User']"})
},
'beacon.feedback': {
'Meta': {'object_name': 'Feedback'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'redemption_issue': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'beacon.groupmember': {
'Meta': {'object_name': 'GroupMember'},
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['beacon.Contact']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'members'", 'to': "orm['beacon.ContactGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'beacon.image': {
'Meta': {'object_name': 'Image'},
'beacon': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['beacon.Beacon']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_key': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'beacon.location': {
'Meta': {'object_name': 'Location'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'db_index': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'locations'", 'to': u"orm['auth.User']"})
},
'beacon.message': {
'Meta': {'object_name': 'Message'},
'avatar_url': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'beacon': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['beacon.Beacon']"}),
'chat_type': ('django.db.models.fields.CharField', [], {'default': "'UM'", 'max_length': '10'}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['beacon.Contact']", 'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['beacon.Image']", 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'beacon.payment': {
'Meta': {'object_name': 'Payment'},
'date_submitted': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'deal_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['beacon.DealStatus']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_submitted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'payment_authorization': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'beacon.profile': {
'Meta': {'object_name': 'Profile'},
'activated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'activation_code': ('django.db.models.fields.CharField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'deal_place': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['beacon.DealPlace']", 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_venmo_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'normalized_phone': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '20', 'null': 'True', 'blank': 'True'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'}),
'user_type': ('django.db.models.fields.CharField', [], {'default': "'HT'", 'max_length': '2'}),
'venmo_auth_token': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
'beacon.recommendation': {
'Meta': {'object_name': 'Recommendation'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'foursquare_venue_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message_text': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'recommendations'", 'to': u"orm['auth.User']"})
},
'beacon.regionstate': {
'Meta': {'object_name': 'RegionState'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'place': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'region_states'", 'to': "orm['beacon.DealPlace']"}),
'region_state': ('django.db.models.fields.CharField', [], {'default': "'Enter'", 'max_length': '20'}),
'region_type': ('django.db.models.fields.CharField', [], {'default': "'IBeacon'", 'max_length': '20'}),
'showed_notification': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'region_states'", 'to': u"orm['auth.User']"})
},
'beacon.rewarditem': {
'Meta': {'object_name': 'RewardItem'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isRedeemed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'reward_type': ('django.db.models.fields.CharField', [], {'default': "'DK'", 'max_length': '2', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'beacon.rewards': {
'Meta': {'object_name': 'Rewards'},
'deal': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['beacon.Deal']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isRedeemed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'reward_type': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'reward_value': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['beacon'] | [
"[email protected]"
] | |
5a610f06fbc3cf603a2ed76efec99e61edfc5378 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/380/usersdata/350/78405/submittedfiles/testes.py | c0375b269d872716a49804d9bdd8ad65b628d857 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 119 | py | # -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
c = input('Digite o valor de c: ')
n = (c*9)
print(n)
| [
"[email protected]"
] | |
715dc05ef8ff9902ec23f1039df3e9c7fa5bbe74 | a9ca484b422c802f02684ad64694212a7374a180 | /devel/lib/turtlebot3_example/turtlebot3_server | e8c5800d6414be4bda9b9bbc6d47d9b985fbad01 | [] | no_license | akash1306/mate_unity | 791cd5fadc1fae10e01e6f577fef5c43098eb766 | e947904f37cad6c814c9e5dfea0017a3d02a5b31 | refs/heads/master | 2022-12-10T11:22:59.709816 | 2020-08-30T10:29:34 | 2020-08-30T10:29:34 | 290,722,050 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 577 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# generated from catkin/cmake/template/script.py.in
# creates a relay to a python script source file, acting as that file.
# The purpose is that of a symlink
python_script = '/home/kajal/mate_unity/src/turtlebot3/turtlebot3_example/nodes/turtlebot3_server'
with open(python_script, 'r') as fh:
context = {
'__builtins__': __builtins__,
'__doc__': None,
'__file__': python_script,
'__name__': __name__,
'__package__': None,
}
exec(compile(fh.read(), python_script, 'exec'), context)
| [
"[email protected]"
] | ||
10051e7b6695576ce02e1731f8384c452f3e9597 | 08583f5a46dab2455ef707a91a342e6a30f62e8a | /advanced-web-scraping-2/14.py | d97c5ac81eb880454b2680bb8a8a6d3656c18d5e | [] | no_license | mahdi-asadzadeh/python-webscraping-simple-projects | 924cfefcf1e88698bd76e09e2824da28a9460cb0 | 24f629f48921e7d047f5b5c0803e4d9b3ec31c86 | refs/heads/main | 2023-06-20T08:58:46.730938 | 2021-07-22T11:59:56 | 2021-07-22T11:59:56 | 388,445,419 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 275 | py | import requests
url = 'http://www.webscrapingfordatascience.com/referercheck/secret.php'
my_headers = {
'Referer': 'http://www.webscrapingfordatascience.com/referercheck/'
}
r = requests.get(url, headers=my_headers)
print(r.text)
print(r.headers)
print(r.request.headers) | [
"[email protected]"
] | |
b8dc96f868b729cc7b8462228867ea99297e6c79 | 5f106b652fb2def72a5dac0f828f2ddb43b00b61 | /datashape/run_test.py | f6e1f97d0948812727405fcfb69088d542dd1542 | [
"BSD-3-Clause"
] | permissive | honnibal/anaconda-recipes | 9631cf038fa1817279075159da7361d0e869f7e7 | 5b6aa944763019afd767aff07939051c538fd15d | refs/heads/master | 2021-01-18T19:30:46.468145 | 2017-04-01T10:12:19 | 2017-04-01T10:12:19 | 86,897,632 | 3 | 0 | null | 2017-04-01T08:26:10 | 2017-04-01T08:26:10 | null | UTF-8 | Python | false | false | 77 | py | import datashape
print('datashape.__version__: %s' % datashape.__version__)
| [
"[email protected]"
] | |
608b91ef49bdcf91ae244f0073ec6a7bb4b05a22 | 87163acf1614292be250754f28114f89013f73a3 | /HackerRank/Problem Solving/Implementation/Easy/Cut the sticks.py | 400aa182819db47b8f08baf51c32151e0dddc50c | [] | no_license | khush-01/Python-codes | 742a9d9966d2ceb3ad2e7c78e34ef88e55df955a | da3cae8df0aafe763399066eefc9b786538fdb35 | refs/heads/main | 2023-03-20T04:37:14.020134 | 2021-03-12T04:56:30 | 2021-03-12T04:56:30 | 346,941,048 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | n = int(input())
arr = list(map(int, input().split()))
while len(arr):
print(len(arr))
low = min(arr)
arr = [x for x in arr if x != low]
for x in arr:
x -= low
| [
"[email protected]"
] | |
38bfd9428ec551d1e7378c0bed6889e115ea4cdb | a8499deff2fa4abde885891d655c3f53ab741d37 | /rotkehlchen/typing.py | baf11f828d8959e799887993f4df71e0658772a7 | [
"BSD-3-Clause"
] | permissive | georgerobescu/rotkehlchen | 4c6d2cadcf8b1a8a20f33bb7352b3924a492af54 | 817c880b771b8daf5635b02642861dd9949689e2 | refs/heads/master | 2020-06-25T00:22:20.442607 | 2019-07-26T11:42:13 | 2019-07-26T11:42:13 | 199,137,746 | 0 | 0 | BSD-3-Clause | 2019-07-27T08:25:06 | 2019-07-27T08:24:59 | Python | UTF-8 | Python | false | false | 3,836 | py | from enum import Enum
from typing import Dict, NamedTuple, NewType, Optional, Union
from rotkehlchen.fval import FVal
T_BinaryEthAddress = bytes
BinaryEthAddress = NewType('BinaryEthAddress', T_BinaryEthAddress)
T_Timestamp = int
Timestamp = NewType('Timestamp', T_Timestamp)
T_ApiKey = bytes
ApiKey = NewType('ApiKey', T_ApiKey)
T_ApiSecret = bytes
ApiSecret = NewType('ApiSecret', T_ApiSecret)
T_B64EncodedBytes = bytes
B64EncodedBytes = NewType('B64EncodedBytes', T_B64EncodedBytes)
T_B64EncodedString = str
B64EncodedString = NewType('B64EncodedString', T_B64EncodedString)
class ApiCredentials(NamedTuple):
"""Represents Credentials for various APIs. Exchanges, Premium e.t.c."""
api_key: ApiKey
api_secret: ApiSecret
@staticmethod
def serialize(api_key: str, api_secret: str) -> 'ApiCredentials':
return ApiCredentials(
api_key=ApiKey(str.encode(api_key)),
api_secret=ApiSecret(str.encode(api_secret)),
)
T_FilePath = str
FilePath = NewType('FilePath', T_FilePath)
T_TradePair = str
TradePair = NewType('TradePair', T_TradePair)
T_FiatAsset = str
FiatAsset = NewType('FiatAsset', T_FiatAsset)
T_EthAddres = str
EthAddress = NewType('EthAddress', T_EthAddres)
T_ChecksumEthAddress = str
ChecksumEthAddress = NewType('ChecksumEthAddress', T_ChecksumEthAddress)
T_BTCAddress = str
BTCAddress = NewType('BTCAddress', T_BTCAddress)
BlockchainAddress = Union[EthAddress, BTCAddress, ChecksumEthAddress]
class EthTokenInfo(NamedTuple):
address: ChecksumEthAddress
symbol: str
name: str
decimal: int
T_EmptyStr = str
EmptyStr = NewType('EmptyStr', T_EmptyStr)
T_Fee = FVal
Fee = NewType('Fee', T_Fee)
T_Price = FVal
Price = NewType('Price', T_Price)
class ResultCache(NamedTuple):
"""Represents a time-cached result of some API query"""
result: Dict
timestamp: Timestamp
T_EventType = str
EventType = NewType('EventType', T_EventType)
class EthereumTransaction(NamedTuple):
"""Represent an Ethereum transaction"""
timestamp: Timestamp
block_number: int
hash: bytes
from_address: EthAddress
to_address: EthAddress
value: FVal
gas: FVal
gas_price: FVal
gas_used: FVal
class SupportedBlockchain(Enum):
"""These are the blockchains for which account tracking is supported """
ETHEREUM = 'ETH'
BITCOIN = 'BTC'
class AssetType(Enum):
FIAT = 1
OWN_CHAIN = 2
ETH_TOKEN = 3
OMNI_TOKEN = 4
NEO_TOKEN = 5
XCP_TOKEN = 6
BTS_TOKEN = 7
ARDOR_TOKEN = 8
NXT_TOKEN = 9
UBIQ_TOKEN = 10
NUBITS_TOKEN = 11
BURST_TOKEN = 12
WAVES_TOKEN = 13
QTUM_TOKEN = 14
STELLAR_TOKEN = 15
TRON_TOKEN = 16
ONTOLOGY_TOKEN = 17
ETH_TOKEN_AND_MORE = 18
EXCHANGE_SPECIFIC = 19
VECHAIN_TOKEN = 20
BINANCE_TOKEN = 21
class AssetData(NamedTuple):
"""Data of an asset. Keep in sync with assets/asset.py"""
identifier: str
name: str
symbol: str
active: bool
asset_type: AssetType
# Every asset should have a started timestamp except for FIAT which are
# most of the times older than epoch
started: Optional[Timestamp]
ended: Optional[Timestamp]
forked: Optional[str]
swapped_for: Optional[str]
ethereum_address: Optional[ChecksumEthAddress]
decimals: Optional[int]
class TradeType(Enum):
BUY = 1
SELL = 2
SETTLEMENT_BUY = 3
SETTLEMENT_SELL = 4
def __str__(self) -> str:
if self == TradeType.BUY:
return 'buy'
elif self == TradeType.SELL:
return 'sell'
elif self == TradeType.SETTLEMENT_BUY:
return 'settlement_buy'
elif self == TradeType.SETTLEMENT_SELL:
return 'settlement_sell'
raise RuntimeError('Corrupt value for TradeType -- Should never happen')
| [
"[email protected]"
] | |
09be8dca5ceacb21fe25c7dffea510e79c928207 | 2379e840d0a9e47331ac247f4d6164cdfd548cbd | /tools/docs/docload.py | 6cdadb8d0f4de2a0db4459bf4ad0b6d8bf40ee44 | [
"BSD-3-Clause",
"PostgreSQL"
] | permissive | jberkus/pgweb | b9d04bc60da437f2f60c8e4cf844e0cfa601a160 | fa07ed84b8708240264fe9f091b2087b7f872b8c | refs/heads/master | 2020-04-03T10:12:04.734671 | 2017-05-11T13:00:07 | 2017-05-11T13:00:20 | 30,271,195 | 0 | 0 | null | 2015-02-03T23:49:04 | 2015-02-03T23:49:04 | null | UTF-8 | Python | false | false | 3,730 | py | #!/usr/bin/env python
# Script to load documentation from tarballs
import sys
import os
import tarfile
import re
import tidy
from optparse import OptionParser
from ConfigParser import ConfigParser
import psycopg2
pagecount = 0
quiet = False
re_titlematch = re.compile('<title\s*>([^<]+)</title\s*>', re.IGNORECASE)
## Load a single page
def load_doc_file(filename, f):
tidyopts = dict(drop_proprietary_attributes=1,
alt_text='',
hide_comments=1,
output_xhtml=1,
show_body_only=1,
clean=1,
char_encoding='utf8',
indent='auto',
)
# Postgres 10 started using xml toolchain and now produces docmentation in utf8. So we need
# to figure out which version it is.
rawcontents = f.read()
if rawcontents.startswith('<?xml version="1.0" encoding="UTF-8"'):
# Version 10, use utf8
encoding = 'utf-8'
# XML builds also don't need clean=1, and that one adds some interesting CSS properties
del tidyopts['clean']
else:
encoding = 'latin1'
contents = unicode(rawcontents, encoding)
tm = re_titlematch.search(contents)
if tm:
title = tm.group(1)
else:
title = ""
if not quiet: print "--- file: %s (%s) ---" % (filename, title)
s = tidy.parseString(contents.encode('utf-8'), **tidyopts)
curs.execute("INSERT INTO docs (file, version, title, content) VALUES (%(f)s, %(v)s, %(t)s, %(c)s)",{
'f': filename,
'v': ver,
't': title,
'c': str(s),
})
global pagecount
pagecount += 1
## Main execution
parser = OptionParser(usage="usage: %prog [options] <version> <tarfile>")
parser.add_option("-q", "--quiet", action="store_true", dest="quiet",
help="Run quietly")
(options, args) = parser.parse_args()
if len(args) != 2:
parser.print_usage()
sys.exit(1)
quiet = options.quiet
ver = sys.argv[1]
tarfilename = sys.argv[2]
config = ConfigParser()
config.read(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'docload.ini'))
if not os.path.isfile(tarfilename):
print "File %s not found" % tarfilename
sys.exit(1)
tf = tarfile.open(tarfilename)
connection = psycopg2.connect(config.get('db', 'dsn'))
curs = connection.cursor()
# Verify that the version exists, and what we're loading
curs.execute("SELECT current FROM core_version WHERE tree=%(v)s", {'v': ver})
r = curs.fetchall()
if len(r) != 1:
print "Version %s not found in the system, cannot load!" % ver
sys.exit(1)
iscurrent = r[0][0]
# Remove any old docs for this version (still protected by a transaction while
# we perform the load)
curs.execute("DELETE FROM docs WHERE version=%(v)s", {'v': ver})
re_htmlfile = re.compile('[^/]*/doc/src/sgml/html/.*')
re_tarfile = re.compile('[^/]*/doc/postgres.tar.gz$')
for member in tf:
if re_htmlfile.match(member.name):
load_doc_file(os.path.basename(member.name), tf.extractfile(member))
if re_tarfile.match(member.name):
f = tf.extractfile(member)
inner_tar = tarfile.open(fileobj=f)
for inner_member in inner_tar:
# Some old versions have index.html as a symlink - so let's
# just ignore all symlinks to be on the safe side.
if inner_member.issym(): continue
if inner_member.name.endswith('.html') or inner_member.name.endswith('.htm'):
load_doc_file(inner_member.name, inner_tar.extractfile(inner_member))
tf.close()
# Update the docs loaded timestamp
curs.execute("UPDATE core_version SET docsloaded=CURRENT_TIMESTAMP WHERE tree=%(v)s", {'v': ver})
# Issue varnish purge for all docs of this version
if ver == "0":
# Special handling of developer docs...
ver = "devel"
curs.execute("SELECT varnish_purge('^/docs/' || %(v)s || '/')", {'v': ver})
if iscurrent:
curs.execute("SELECT varnish_purge('^/docs/current/')")
connection.commit()
connection.close()
if not quiet: print "Done (%i pages)." % pagecount
| [
"[email protected]"
] | |
76dc68fb3fe760087f98d3966455e9f50c8e0e40 | c5d6e21744f10c6e57d58b57bba2763b82a9726b | /Bimestre_03_Aula_03/exercicios/04_somando_forcas.py | ccae92fe26f595c8832a0e1be2f97b2d84922e72 | [] | no_license | valeriacavalcanti/ALP-2020-R | bf32af707d49db650deb6d122a1abdf58d94ae4f | 62e0be861ad7439b99ae5d0b0e14d97c887424c7 | refs/heads/main | 2023-05-05T02:05:00.128872 | 2021-06-04T10:30:05 | 2021-06-04T10:30:05 | 316,784,824 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 622 | py | # declarar as matrizes
matriz1 = []
for i in range(6):
matriz1.append([0] * 4)
matriz2 = []
for i in range(6):
matriz2.append([0] * 4)
matriz_soma = []
for i in range(6):
matriz_soma.append([0] * 4)
# ler os dados das matrizes
for i in range(6):
for j in range(4):
matriz1[i][j] = int(input("Matriz1 {} {}: ".format(i, j)))
for i in range(6):
for j in range(4):
matriz2[i][j] = int(input("Matriz2 {} {}: ".format(i, j)))
# calculando a soma das matrizes 1 e 2
for i in range(6):
for j in range(4):
matriz_soma[i][j] = matriz1[i][j] + matriz2[i][j]
print(matriz_soma)
| [
"[email protected]"
] | |
7b5cbe0847dd7ee161252a3f29692ab99f07de36 | d7ccb4225f623139995a7039f0981e89bf6365a4 | /.history/carts/models_20211010234636.py | 41796b5b857bad7162f4bffa6392f2ca23d6f216 | [] | no_license | tonnymuchui/django-mall | 64fd4abc3725c1bd0a3dcf20b93b490fe9307b37 | 55c083d8433be3c77adc61939cd197902de4ce76 | refs/heads/master | 2023-08-23T04:59:20.418732 | 2021-10-13T15:59:37 | 2021-10-13T15:59:37 | 415,668,388 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 344 | py | from django.db import models
# Create your models here.
class Cart(models.Model):
cart_id = models.CharField(max_length=255, blank=True)
date_added = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.cart_id
class CardItem(models.Model):
product = models.ForeignKey(Product, on_delete=models.Ca) | [
"[email protected]"
] | |
ff2921be3e53bb57a802f21445f685d8aa6bbabf | bc441bb06b8948288f110af63feda4e798f30225 | /notify_sdk/model/easy_flow/package_info_pb2.py | be3f54867be88fa1063e26c8f7b235927c5b2589 | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | true | 13,382 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: package_info.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from notify_sdk.model.cmdb import cluster_info_pb2 as notify__sdk_dot_model_dot_cmdb_dot_cluster__info__pb2
from notify_sdk.model.easy_flow import version_info_pb2 as notify__sdk_dot_model_dot_easy__flow_dot_version__info__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='package_info.proto',
package='easy_flow',
syntax='proto3',
serialized_options=_b('ZCgo.easyops.local/contracts/protorepo-models/easyops/model/easy_flow'),
serialized_pb=_b('\n\x12package_info.proto\x12\teasy_flow\x1a(notify_sdk/model/cmdb/cluster_info.proto\x1a-notify_sdk/model/easy_flow/version_info.proto\"\xc2\x04\n\x0bPackageInfo\x12\x13\n\x0bpackageName\x18\x01 \x01(\t\x12\x13\n\x0bversionName\x18\x02 \x01(\t\x12\"\n\x07\x63luster\x18\x03 \x01(\x0b\x32\x11.cmdb.ClusterInfo\x12\x14\n\x0cpreVersionId\x18\x04 \x01(\t\x12\x16\n\x0epreVersionName\x18\x05 \x01(\t\x12\x19\n\x11preVersionEnvType\x18\x06 \x01(\x05\x12\x15\n\rtargetVersion\x18\x07 \x01(\t\x12\x0f\n\x07preStop\x18\x08 \x01(\x08\x12\x11\n\tpostStart\x18\t \x01(\x08\x12\x13\n\x0bpostRestart\x18\n \x01(\x08\x12\x11\n\tautoStart\x18\x0b \x01(\x08\x12\x11\n\tuserCheck\x18\x0c \x01(\x08\x12\x12\n\nfullUpdate\x18\r \x01(\x08\x12\x13\n\x0b\x66orceUpdate\x18\x0e \x01(\x08\x12\r\n\x05\x66orce\x18\x0f \x01(\x08\x12\x14\n\x0c\x66orceInstall\x18\x10 \x01(\x08\x12\x17\n\x0fsimulateInstall\x18\x11 \x01(\x08\x12+\n\x0bversionInfo\x18\x12 \x01(\x0b\x32\x16.easy_flow.VersionInfo\x12\x11\n\tspecialOp\x18\x13 \x01(\t\x12\x10\n\x08\x66ileOnly\x18\x14 \x01(\x08\x12\x12\n\nscriptOnly\x18\x15 \x01(\x08\x12\x11\n\tversionId\x18\x16 \x01(\t\x12\x11\n\tpackageId\x18\x17 \x01(\t\x12\x13\n\x0binstallPath\x18\x18 \x01(\t\x12\x0c\n\x04type\x18\x19 \x01(\x05\x12\x10\n\x08platform\x18\x1a \x01(\tBEZCgo.easyops.local/contracts/protorepo-models/easyops/model/easy_flowb\x06proto3')
,
dependencies=[notify__sdk_dot_model_dot_cmdb_dot_cluster__info__pb2.DESCRIPTOR,notify__sdk_dot_model_dot_easy__flow_dot_version__info__pb2.DESCRIPTOR,])
_PACKAGEINFO = _descriptor.Descriptor(
name='PackageInfo',
full_name='easy_flow.PackageInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='packageName', full_name='easy_flow.PackageInfo.packageName', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='versionName', full_name='easy_flow.PackageInfo.versionName', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster', full_name='easy_flow.PackageInfo.cluster', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='preVersionId', full_name='easy_flow.PackageInfo.preVersionId', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='preVersionName', full_name='easy_flow.PackageInfo.preVersionName', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='preVersionEnvType', full_name='easy_flow.PackageInfo.preVersionEnvType', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='targetVersion', full_name='easy_flow.PackageInfo.targetVersion', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='preStop', full_name='easy_flow.PackageInfo.preStop', index=7,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='postStart', full_name='easy_flow.PackageInfo.postStart', index=8,
number=9, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='postRestart', full_name='easy_flow.PackageInfo.postRestart', index=9,
number=10, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='autoStart', full_name='easy_flow.PackageInfo.autoStart', index=10,
number=11, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='userCheck', full_name='easy_flow.PackageInfo.userCheck', index=11,
number=12, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='fullUpdate', full_name='easy_flow.PackageInfo.fullUpdate', index=12,
number=13, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='forceUpdate', full_name='easy_flow.PackageInfo.forceUpdate', index=13,
number=14, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='force', full_name='easy_flow.PackageInfo.force', index=14,
number=15, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='forceInstall', full_name='easy_flow.PackageInfo.forceInstall', index=15,
number=16, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='simulateInstall', full_name='easy_flow.PackageInfo.simulateInstall', index=16,
number=17, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='versionInfo', full_name='easy_flow.PackageInfo.versionInfo', index=17,
number=18, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='specialOp', full_name='easy_flow.PackageInfo.specialOp', index=18,
number=19, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='fileOnly', full_name='easy_flow.PackageInfo.fileOnly', index=19,
number=20, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='scriptOnly', full_name='easy_flow.PackageInfo.scriptOnly', index=20,
number=21, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='versionId', full_name='easy_flow.PackageInfo.versionId', index=21,
number=22, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='packageId', full_name='easy_flow.PackageInfo.packageId', index=22,
number=23, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='installPath', full_name='easy_flow.PackageInfo.installPath', index=23,
number=24, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='easy_flow.PackageInfo.type', index=24,
number=25, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='platform', full_name='easy_flow.PackageInfo.platform', index=25,
number=26, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=123,
serialized_end=701,
)
_PACKAGEINFO.fields_by_name['cluster'].message_type = notify__sdk_dot_model_dot_cmdb_dot_cluster__info__pb2._CLUSTERINFO
_PACKAGEINFO.fields_by_name['versionInfo'].message_type = notify__sdk_dot_model_dot_easy__flow_dot_version__info__pb2._VERSIONINFO
DESCRIPTOR.message_types_by_name['PackageInfo'] = _PACKAGEINFO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
PackageInfo = _reflection.GeneratedProtocolMessageType('PackageInfo', (_message.Message,), {
'DESCRIPTOR' : _PACKAGEINFO,
'__module__' : 'package_info_pb2'
# @@protoc_insertion_point(class_scope:easy_flow.PackageInfo)
})
_sym_db.RegisterMessage(PackageInfo)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
] | |
9f29c6cd97e8f4ec92d968578340ad5c858fb023 | be4459658d667c47eefeeb3cf689a678042edb94 | /modules/ext/util/lua/packages/luaunit/luaunit/doit.py | b4e636abe621a2734eb4e4f3ce9b7ce2d3bef357 | [
"Apache-2.0",
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | kparr/RIFT.ware-1 | 7945174aa23ac1f7d74a7464b645db5824982fc3 | 6846108d70b80b95c5117fdccd44ff058ac605be | refs/heads/master | 2021-01-13T08:36:03.751610 | 2016-07-24T21:36:15 | 2016-07-24T21:36:15 | 72,420,438 | 0 | 0 | null | 2016-10-31T09:11:27 | 2016-10-31T09:11:27 | null | UTF-8 | Python | false | false | 2,796 | py | import subprocess, sys, os, shutil, os.path, optparse
VERSION='3.0'
RELEASE_NAME='luaunit-%s' % VERSION
RELEASE_DIR='release/' + RELEASE_NAME + '/'
TARGET_ZIP=RELEASE_NAME + '.zip'
TARGET_TGZ=RELEASE_NAME + '.tgz'
REPO_PATH='d:/work/luaunit/luaunit-git/luaunit/'
LUA50='d:/program/lua/lua50/lua50.exe'
LUA51='d:/program/lua/lua51/lua51.exe'
LUA52='d:/program/lua/lua52/lua52.exe'
ALL_LUA = (
(LUA52, 'lua 5.2'),
(LUA51, 'lua 5.1'),
# (LUA50, 'lua 5.0'),
)
os.environ["nodosfilewarning"] = "1"
def report( s ):
print '[[[[[[[[[[[[[ %s ]]]]]]]]]]]]]' % s
def run_tests():
'''Run tests with all versions of lua'''
for lua, luaversion in ALL_LUA:
report( 'Running tests with %s' % luaversion )
retcode = subprocess.call( [lua, 'test_luaunit.lua'] )
if retcode != 0:
report( 'Invalid retcode when running tests: %d' % retcode )
sys.exit( retcode )
report( 'All tests succeed!' )
def run_example():
for lua, luaversion in ALL_LUA:
report( 'Running examples with %s' % luaversion )
retcode = subprocess.call( [lua, 'example_with_luaunit.lua'] )
if retcode != 12:
report( 'Invalid retcode when running examples: %d' % retcode )
sys.exit( retcode )
report( 'All examples ran!' )
def packageit():
shutil.rmtree('release', True)
try:
os.mkdir('release')
except OSError:
pass
subprocess.check_call(['d:/program/msysgit/msysgit/bin/git.exe', 'clone', '--no-hardlinks', REPO_PATH, RELEASE_DIR])
os.chdir( RELEASE_DIR )
# Release dir cleanup
shutil.rmtree('.git')
os.unlink('.gitignore')
run_tests()
run_example()
makedoc()
shutil.rmtree('doc/_build')
# Packaging
os.chdir('..')
report('Start packaging')
shutil.make_archive(RELEASE_NAME, 'zip', root_dir='.', base_dir=RELEASE_NAME )
shutil.make_archive(RELEASE_NAME, 'gztar', root_dir='.', base_dir=RELEASE_NAME )
report('Zip and tgz ready!')
def help():
print( 'Available actions:')
for opt in OptToFunc:
print '\t%s' % opt
def makedoc():
os.chdir('doc')
if os.path.exists('html'):
shutil.rmtree('html')
subprocess.check_call(['make.bat', 'html'])
shutil.copytree('_build/html', 'html')
os.chdir('..')
OptToFunc = {
'runtests' : run_tests,
'runexample' : run_example,
'packageit' : packageit,
'makedoc' : makedoc,
'help' : help,
}
if __name__ == '__main__':
doingNothing = True
for arg in sys.argv[1:]:
if OptToFunc.has_key(arg):
doingNothing = False
OptToFunc[arg]()
else:
print 'No such action :', arg
sys.exit(-1)
if doingNothing:
help()
| [
"[email protected]"
] | |
6bc2f2f74fa4f7f954d72f9f188aa154ec803c32 | 4f7aa44d21ae38093869e79e10f5cdc8842d48b7 | /01-python-academy-intro-lab/exceptions_01.py | 743b25bface618a473cf911dd91b8165a4e2d8b3 | [
"Apache-2.0"
] | permissive | iproduct/intro-python | 31e802c2c21a4df3361656f12d267ec52c2d6564 | 7e08e144da2907fcf45dc734ab4e896631625d75 | refs/heads/master | 2023-02-19T11:42:37.522624 | 2023-02-13T15:54:03 | 2023-02-13T15:54:03 | 128,980,155 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | import sys
class MyException(Exception):
def __init__(self, *args):
# Exception.__init__(self, *args)
super().__init__(*args)
def erroneuos():
try:
return 1 / 0
except:
print("Within except.")
raise
finally:
print("Within finally.")
if __name__ == "__main__":
try:
erroneuos()
except Exception as ex:
# tb = sys.exc_info()[2]
# print(f"Type: {sys.exc_info()[0]}, Value: {sys.exc_info()[1]}\n")
raise MyException("Raised from main()") from ex
| [
"[email protected]"
] | |
6c6ca35bcb1cae919f48bc7f70104caabf953925 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_136/2329.py | bf535ce3f6a4ddf821921237facfb76044f407d5 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | ipt = open('B-large.in').read().splitlines()
cases = int(ipt[0])
fw = open('B-large.out','w')
for i in xrange(cases):
c_per_sec = 2.0
c,f,x = map(lambda x: float(x), ipt[i+1].split(' '))
just_wait = x / c_per_sec
sub_total = 0
while True:
#print just_wait
sub_total += c/c_per_sec
c_per_sec += f
total = sub_total + (x / c_per_sec)
if total >= just_wait:
fw.write("Case #{}: {}\n".format(i+1, round(just_wait, 7)))
break
else:
just_wait = total
fw.close() | [
"[email protected]"
] | |
2ece9826a7c51f922007dd4b37ea17ae54963ccc | c609730a43596a2d3303f072fc97d9cf681fac7b | /cagey/kache/kache/spiders/woniuhc.py | 954c5f0dbca656d7d140c892215686432f16e2ad | [] | no_license | sinnettluo/ChenProject | 5403311c0c7b78c484145e16d692abff00d2a110 | 0e33ecf1683afb22f1deb4bd54294c41aed8a46b | refs/heads/master | 2023-03-22T23:48:08.430178 | 2020-09-02T15:05:02 | 2020-09-02T15:05:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,182 | py | # -*- coding: utf-8 -*-
import scrapy
import time
import json
from copy import deepcopy
from kache.items import woniuItem
class WoniuhcSpider(scrapy.Spider):
name = 'woniuhc'
allowed_domains = ['woniuhuoche.com']
# start_urls = ['http://woniuhuoche.com/']
@classmethod
def update_settings(cls, settings):
settings.setdict(getattr(cls, 'custom_debug_settings' if getattr(cls, 'is_debug', False) else 'custom_settings', None) or {}, priority='spider')
def __init__(self, **kwargs):
super(WoniuhcSpider, self).__init__(**kwargs)
self.counts = 0
is_debug = True
custom_debug_settings = {
'MYSQL_SERVER': '192.168.1.94',
'MYSQL_DB': 'truck',
'MYSQL_TABLE': 'woniuhc',
'MONGODB_SERVER': '192.168.1.94',
'MONGODB_DB': 'truck',
'MONGODB_COLLECTION': 'woniuhc',
'CONCURRENT_REQUESTS': 8,
'DOWNLOAD_DELAY': 0,
'LOG_LEVEL': 'DEBUG',
}
def start_requests(self):
url = "http://www.woniuhuoche.com/truck-auction-app/api/auction/v1/lotList?source=2&type=0"
yield scrapy.Request(
url=url,
dont_filter=True,
)
def parse(self, response):
item = woniuItem()
res = json.loads(response.text)
data_list = res["data"]["lotList"]
for data in data_list:
item["title"] = data["title"]
item["carid"] = data["lotId"]
item["truckId"] = data["truckId"]
item["registerdate"] = data["registDate"]
item["price"] = data["maxPrice"]
item["emission"] = data["emission"]
item["startTime"] = data["startTime"]
item["endTime"] = data["endTime"]
url = f"http://www.woniuhuoche.com/truck-auction-app/api/auction/v1/truckDetail?lotId={item['carid']}&truckId={item['truckId']}"
item["url"] = url
yield scrapy.Request(
url=url,
callback=self.parse_detail_url,
meta={"item": deepcopy(item)},
dont_filter=True,
)
def parse_detail_url(self, response):
item = response.meta["item"]
res = json.loads(response.text)
data = res["data"]
item["city"] = data["city"]
basicList = data["basicList"]
for basic in basicList:
if "数量" in basic["key"]:
item["num"] = basic["value"]
if "车辆类型" in basic["key"]:
item["car_type"] = basic["value"]
if "表显里程" in basic["key"]:
item["mileage"] = basic["value"]
if "发动机品牌" in basic["key"]:
item["engine"] = basic["value"]
if "燃料类型" in basic["key"]:
item["fuel"] = basic["value"]
if "排放标准" in basic["key"]:
item["let"] = basic["value"]
if "品牌" in basic["key"]:
item["brand"] = basic["value"]
if "车辆颜色" in basic["key"]:
item["color"] = basic["value"]
if "最大马力" in basic["key"]:
item["hoursepower"] = basic["value"]
if "箱体长度" in basic["key"]:
item["containerLong"] = basic["value"]
if "栏板高度" in basic["key"]:
item["containerHight"] = basic["value"]
if "驱动形式" in basic["key"]:
item["driveType"] = basic["value"]
if "罐体容积" in basic["key"]:
item["containerVolume"] = basic["value"]
if "看车地点" in basic["key"]:
item["carLocation"] = basic["value"]
proceduresList = data["proceduresList"]
for procedures in proceduresList:
if "是否可过户" in procedures["key"]:
item["isTransfer"] = procedures["value"]
if "年检到期日" in procedures["key"]:
item["inspectionDate1"] = procedures["value"]
if "购置税证" in procedures["key"]:
item["isPurchase"] = procedures["value"]
if "交强险" in procedures["key"]:
item["inspectionDate2"] = procedures["value"]
if "第三责任险" in procedures["key"]:
item["inspectionDate3"] = procedures["value"]
if "其他法定凭证、证书" in procedures["key"]:
item["isCertificate"] = procedures["value"]
if "是否有违章" in procedures["key"]:
item["isRules"] = procedures["value"]
if "是否抵押车" in procedures["key"]:
item["isMortgage"] = procedures["value"]
detect = data["detect"]
item["grade"] = detect["grade"]
item["surveyor"] = detect["surveyor"]
item["detectTime"] = detect["detectTime"]
item["detectItem"] = json.dumps(detect["detectItem"], ensure_ascii=False)
item["desc"] = data["descp"]
item["statusplus"] = item["url"]+'-'+item["price"]
item["grab_time"] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
yield item
# print(item)
| [
"[email protected]"
] | |
b3305b39cd894d4e5bb295a10bedfc5a27e0d140 | e358b0c801b0173e2b5fe31820af2a45d5fff8ae | /altair_saver/savers/tests/test_html.py | 6899fec8d9fd514217be3d235d24a1e5c2dde027 | [
"BSD-3-Clause"
] | permissive | steevensmelo/altair_saver | a3270d2a4c2615c0b86b6d10f87e67f04c3bd40f | d8dfc9a8d40b3ad20d152e014b37e3b1e6d512ef | refs/heads/master | 2022-06-11T11:53:12.329070 | 2020-05-13T14:50:11 | 2020-05-13T14:50:11 | 263,651,277 | 0 | 0 | BSD-3-Clause | 2020-05-13T14:19:45 | 2020-05-13T14:19:44 | null | UTF-8 | Python | false | false | 5,557 | py | import io
import json
import os
from typing import Any, Dict, IO, Iterator, Optional, Tuple
from altair_data_server import Provider
from PIL import Image
import pytest
import selenium.webdriver
from selenium.webdriver.remote.webdriver import WebDriver
from altair_saver import HTMLSaver
from altair_saver._utils import internet_connected
CDN_URL = "https://cdn.jsdelivr.net"
@pytest.fixture(scope="module")
def internet_ok() -> bool:
return internet_connected()
@pytest.fixture(scope="module")
def provider() -> Iterator[Provider]:
provider = Provider()
yield provider
provider.stop()
@pytest.fixture(scope="module")
def driver() -> Iterator[WebDriver]:
options = selenium.webdriver.chrome.options.Options()
options.add_argument("--headless")
if hasattr(os, "geteuid") and (os.geteuid() == 0):
options.add_argument("--no-sandbox")
driver = selenium.webdriver.Chrome(options=options)
yield driver
driver.quit()
def get_testcases() -> Iterator[Tuple[str, Dict[str, Any]]]:
directory = os.path.join(os.path.dirname(__file__), "testcases")
cases = set(f.split(".")[0] for f in os.listdir(directory))
f: IO
for case in sorted(cases):
with open(os.path.join(directory, f"{case}.vl.json")) as f:
vl = json.load(f)
with open(os.path.join(directory, f"{case}.png"), "rb") as f:
png = f.read()
yield case, {"vega-lite": vl, "png": png}
@pytest.mark.parametrize("inline", [True, False])
@pytest.mark.parametrize("embed_options", [None, {"theme": "dark"}])
@pytest.mark.parametrize("case, data", get_testcases())
def test_html_save(
case: str, data: Dict[str, Any], embed_options: Optional[dict], inline: bool
) -> None:
saver = HTMLSaver(data["vega-lite"], inline=inline, embed_options=embed_options)
fp = io.StringIO()
saver.save(fp, "html")
html = fp.getvalue()
assert isinstance(html, str)
assert html.strip().startswith("<!DOCTYPE html>")
assert json.dumps(data["vega-lite"]) in html
assert f"const embedOpt = {json.dumps(embed_options or {})}" in html
if inline:
assert CDN_URL not in html
else:
assert CDN_URL in html
@pytest.mark.parametrize("embed_options", [None, {"theme": "dark"}])
@pytest.mark.parametrize("case, data", get_testcases())
def test_html_mimebundle(
case: str, data: Dict[str, Any], embed_options: Optional[dict],
) -> None:
saver = HTMLSaver(data["vega-lite"], embed_options=embed_options)
bundle = saver.mimebundle("html")
assert bundle.keys() == {"text/html"}
html = bundle["text/html"]
assert isinstance(html, str)
assert html.strip().startswith("<div")
assert json.dumps(data["vega-lite"]) in html
assert json.dumps(embed_options or {}) in html
assert CDN_URL in html
def test_bad_format() -> None:
saver = HTMLSaver({})
with pytest.raises(ValueError):
saver.mimebundle("vega")
@pytest.mark.parametrize("case, data", get_testcases())
@pytest.mark.parametrize("inline", [True, False])
def test_html_save_rendering(
provider: Provider,
driver: WebDriver,
case: str,
data: Dict[str, Any],
inline: bool,
internet_ok: bool,
) -> None:
if not (inline or internet_ok):
pytest.xfail("Internet connection not available")
saver = HTMLSaver(data["vega-lite"], inline=inline)
fp = io.StringIO()
saver.save(fp, "html")
html = fp.getvalue()
resource = provider.create(content=html, extension="html")
driver.set_window_size(800, 600)
driver.get(resource.url)
element = driver.find_element_by_class_name("vega-visualization")
png = driver.get_screenshot_as_png()
im = Image.open(io.BytesIO(png))
left = element.location["x"]
top = element.location["y"]
right = element.location["x"] + element.size["width"]
bottom = element.location["y"] + element.size["height"]
im = im.crop((left, top, right, bottom))
im_expected = Image.open(io.BytesIO(data["png"]))
assert abs(im.size[0] - im_expected.size[0]) < 40
assert abs(im.size[1] - im_expected.size[1]) < 40
@pytest.mark.parametrize("requirejs", [True, False])
@pytest.mark.parametrize("case, data", get_testcases())
def test_html_mimebundle_rendering(
provider: Provider,
driver: WebDriver,
case: str,
data: Dict[str, Any],
requirejs: bool,
internet_ok: bool,
) -> None:
if not internet_ok:
pytest.xfail("Internet connection not available")
saver = HTMLSaver(data["vega-lite"])
bundle = saver.mimebundle("html")
html = bundle["text/html"]
assert isinstance(html, str)
if requirejs:
html = f"""<!DOCTYPE html>
<html>
<head><script src="{CDN_URL}/npm/[email protected]"></script></head>
<body>{html}</body>
</html>
"""
else:
html = f"<html>{html}</html>"
resource = provider.create(content=html, extension="html")
driver.set_window_size(800, 600)
driver.get(resource.url)
element = driver.find_element_by_class_name("vega-visualization")
png = driver.get_screenshot_as_png()
im = Image.open(io.BytesIO(png))
left = element.location["x"]
top = element.location["y"]
right = element.location["x"] + element.size["width"]
bottom = element.location["y"] + element.size["height"]
im = im.crop((left, top, right, bottom))
im_expected = Image.open(io.BytesIO(data["png"]))
assert abs(im.size[0] - im_expected.size[0]) < 40
assert abs(im.size[1] - im_expected.size[1]) < 40
| [
"[email protected]"
] | |
d1ec62a1a39029406e4931b1030ca1fc287e0ef6 | 8985ee04b6d9af28c324e1567f73b114853c3f24 | /src/ts_models/discrete_sparse/tsb.py | 077e943ef31160d8dae4d5fdca012be68999e6f5 | [] | no_license | longgb246/MLlearn | 97f242dda7a43b2b1fee8458c24f121dcbb05e08 | db2d0b05020a1fcb9f0cfaf9386f79daeaad759e | refs/heads/master | 2020-03-29T14:02:43.778113 | 2019-04-04T02:53:59 | 2019-04-04T02:53:59 | 149,995,653 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,492 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/10/19 9:11
# @Author : zhengdepeng
# @concat : [email protected]
# @File : crost.py.py
# @Software: PyCharm
"""
description:function to forecast intermittent demand or slow moving items series / this function
Reference :R version package,http://kourentzes.com/forecasting/2014/06/23/intermittent-demand-forecasting-package-for-r/
"""
import numpy as np
import warnings
from scipy.optimize import minimize
import functools
def tsb(data, horizon=10, smoothing_para=None, init_method=("mean", "mean"), num_para=1, cost="mse",
init_opt=True, opt_on=True):
"""
:param data: Intermittent demand time series. ->single time series
:param horizon: Forecast horizon.
:param smoothing_para: Smoothing parameters. If w == NULL then parameters are optimised.If w is a single
parameter then the same is used for smoothing both the demand and the intervals. If two
parameters are provided then the second is used to smooth the intervals.
:param init_method: Initial values for demand and intervals. This is:
c(z,x) - Vector of two scalars, where first is initial demand and
second is initial interval;
"naive" - Initial demand is first non-zero demand and initial interval
is first interval;
"mean" - Same as "naive", but initial interval is the mean of all
in sample intervals.
:param num_para: Specifies the number of model parameters. Used only if they are optimised.
1 - Demand and interval parameters are the same
2 - Different demand and interval parameters
:param cost: Cost function used for optimisation
"mar" - Mean absolute rate
"msr" - Mean squared rate
"mae" - Mean absolute error
"mse" - Mean squared error
:param init_opt: If init.opt==TRUE then initial values are optimised.
:param opt_on: This is meant to use only by the optimisation function. When opt.on is
# TRUE then no checks on inputs are performed.
:return:
model Type of model fitted.
frc_in In-sample demand rate.
frc_out Out-of-sample demand rate.
weights Smoothing parameters for demand and interval.
initial Initialisation values for demand and interval smoothing.
component List of c.in and c.out containing the non-zero demand and interval vectors for
in- and out-of-sample respectively. Third element is the coefficient used to scale
demand rate for sba and sbj.
"""
# trans data model_type to np.ndarray(1)
data_type = type(data)
if isinstance(data, np.ndarray):
data = data.ravel().copy()
else:
if isinstance(data, list):
data = np.array(data).ravel().copy()
else:
raise ValueError("data can be either np.ndarray or list. {0} is not allowed".format(data_type))
# make sure that num_para is of correct length
if (num_para > 2 or num_para < 1):
num_para = 2
warnings.warn("num_para can be either 1 or 2. Overwritten to 2")
n = data.shape[0]
# check number of non-zero values -- need to have as least two
# if ((data!=0).sum())<2:
# raise ValueError("need as least two non-zero valus to model time series")
# TSB decomposition
# print(data)
p = (data != 0).astype(int)
z = data[data != 0]
# initialize
if init_method == ("mean", "mean"):
init = [z[0], p.mean()]
else:
init = [z[0], p[0]]
# optimize parameters if requested
if opt_on == False:
if (init_opt == False and smoothing_para is None):
# print("###################optimizing##################")
opt_para = crost_opt(data, cost, smoothing_para, num_para, init, init_opt)
smoothing_para = opt_para[0]
# init = opt_para[1]
zfit = np.zeros(n)
pfit = np.zeros(n)
if len(smoothing_para == 1):
smoothing_demand = smoothing_para[0]
smoothing_prob = smoothing_para[0]
else:
smoothing_demand = smoothing_para[0]
smoothing_prob = smoothing_para[1]
# assign initial value and parameters
zfit[0] = init[0]
pfit[0] = init[1]
# fit model
# print(nzd,x,xfit)
for day in range(1, n):
pfit[day] = pfit[day - 1] + smoothing_prob * (p[day] - pfit[day - 1])
if (p[day] == 0):
zfit[day] = zfit[day - 1]
else:
zfit[day] = zfit[day - 1] + smoothing_demand * (data[day] - zfit[day - 1])
yfit = pfit * zfit
# calculate in-sample demand
frc_in = np.append([0], yfit[:-1])
# forcast out-of-sample demand
frc_out = np.tile(frc_in[-1], horizon)
# print("frc_in",frc_in)
return [frc_in, frc_out, zfit, pfit, smoothing_demand, smoothing_prob]
def crost_opt(data, cost, smoothing_para, num_para, init, init_opt):
"""
:param data: Intermittent demand time series. ->single time series
:param model_type: Croston's method variant:
1 - "croston" Croston's method;
2 - "sba" Syntetos-Boylan approximation;
3 - "sbj" Shale-Boylan-Johnston.
:param cost: Cost function used for optimisation
"mar" - Mean absolute rate
"msr" - Mean squared rate
"mae" - Mean absolute error
"mse" - Mean squared error
:param smoothing_para: Smoothing parameters. If w == NULL then parameters are optimised.If w is a single
parameter then the same is used for smoothing both the demand and the intervals. If two
parameters are provided then the second is used to smooth the intervals.
:param num_para: Specifies the number of model parameters. Used only if they are optimised.
1 - Demand and interval parameters are the same
2 - Different demand and interval parameters
:param init: initialized estimator point
:param init_opt: If init.opt==TRUE then initial values are optimised.
:return:
optimized_parameter: list[smoothing_para,optimized_init]
"""
if (smoothing_para is None and init_opt == False):
starting_para = [0.05] * num_para
bounds = [(0, 1)] * num_para
lbound = [0] * num_para
ubound = [1] * num_para
optimized_para = minimize(
fun=functools.partial(crost_cost, data=data, smoothing_opt=True, cost=cost, num_para=num_para, init=init,
init_opt=init_opt), x0=starting_para, method="Nelder-Mead", bounds=bounds)['x']
else:
raise ValueError("only smoothing_para optimization is supported,you have to set init_opt=False")
return [optimized_para]
def crost_cost(model_para, smoothing_opt, data, cost, num_para, init, init_opt):
"""
calculate total cost with given loss function and data
:param model_para: the parameter to optimize
:param data: Intermittent demand time series. ->single time series
:smoothing_para: Smoothing parameters. If w == NULL then parameters are optimised.If w is a single
parameter then the same is used for smoothing both the demand and the intervals. If two
parameters are provided then the second is used to smooth the intervals.
:param cost: Cost function used for optimisation
"mar" - Mean absolute rate
"msr" - Mean squared rate
"mae" - Mean absolute error
"mse" - Mean squared error
:param model_type: Croston's method variant:
1 - "croston" Croston's method;
2 - "sba" Syntetos-Boylan approximation;
3 - "sbj" Shale-Boylan-Johnston.
:param num_para: Specifies the number of model parameters. Used only if they are optimised.
1 - Demand and interval parameters are the same
2 - Different demand and interval parameters
:param opt_on: This is meant to use only by the optimisation function. When opt.on is
TRUE then no checks on inputs are performed.
:param init_method: Initial values for demand and intervals. This is:
c(z,x) - Vector of two scalars, where first is initial demand and
second is initial interval;
"naive" - Initial demand is first non-zero demand and initial interval
is first interval;
"mean" - Same as "naive", but initial interval is the mean of all
in sample intervals.
:param init_opt: If init.opt==TRUE then initial values are optimised.
:param lbound: lower bound for optimized parameter
:param ubound: upper bound for optimized parameter
:return:
scalar total cost
"""
if (smoothing_opt == True and init_opt == False):
frc_in = tsb(data=data, horizon=0, smoothing_para=model_para, init_method=init, opt_on=True)[0]
# print(frc_in)
# print(data)
else:
raise ValueError("only smoothing_para optimization is supported,you have to set init_opt=False")
if cost == 'mse':
E = data - frc_in
E = (E ** 2).mean()
if cost == 'mae':
E = data - frc_in
E = np.abs(E).mean()
if cost == 'mapd':
E = np.abs(data.sum() - frc_in.sum()) / data.sum()
if cost not in ['mse', 'mae', 'mapd']:
raise ValueError("Cost '{cost}' is not supported til now".format(cost=cost))
# print(E)
return E
# def test_cost(para,method):
# data=np.array([1,2])
# data2 = data * para
# data3 = data * para
# if method==1:
# residual = (data - data2 -3)**2
# else:
# residual = data ** 2 - data2 - data3 * 2
# return residual.sum()
#
# test_cost2 = functools.partial(test_cost, method=1)
#
# def func1(para):
# return (para-1)**2 - 3*para
#
#
# minimize(test_cost2,x0=(1),method="Nelder-Mead",tol=1e-2,options={'disp':True})
if __name__ == '__main__':
data = np.array(
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
results = tsb(data=data, horizon=7, smoothing_para=None, init_method='naive', num_para=2, cost='mapd',
init_opt=False, opt_on=False)
print(results[1])
frc_in = results[0]
print(frc_in)
print(frc_in.sum())
print(data.sum())
| [
"[email protected]"
] | |
360826c9d54c53412f463d98e4c247151a877b01 | 16d159d6d3fe69d513717caad3e2c21320f93224 | /AtCoder/ABC/ABC101-150/abc141/abc141b.py | 23b49e7105c52df065df47863ad2a802a189189d | [] | no_license | tsushiy/competitive-programming-submissions | d4f068a5157c0de0f1822367e0ca66dd978e43f9 | 9011d855d9252134179cc9cc8f328f6e0ca32407 | refs/heads/master | 2023-04-11T08:34:01.015316 | 2021-04-11T15:16:17 | 2021-04-11T15:17:35 | 175,807,862 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 193 | py | s = input()
flag = True
for i, e in enumerate(s):
if i%2==0 and e not in "RUD":
flag = False
elif i%2==1 and e not in "LUD":
flag = False
if flag:
print("Yes")
else:
print("No") | [
"[email protected]"
] | |
6e6e1991a34c11354f69814d4470d27f07744b55 | 8e02228b5857d876de244d37b1d6ec3bbd21227e | /python_modules/dagit/dagit_tests/test_debug_cli.py | 8131baa4613455e33bad678940c7646f893f743e | [
"Apache-2.0"
] | permissive | ChocoletMousse/dagster | 4af255d5ab99c25d1a2be379a5bbd83fa3221b64 | a256cb43cde0ab5a800a87ee1f55de560587a4ab | refs/heads/master | 2022-12-26T00:04:31.172150 | 2020-09-25T21:36:15 | 2020-09-26T02:15:35 | 297,765,844 | 1 | 0 | Apache-2.0 | 2020-09-22T20:35:23 | 2020-09-22T20:35:22 | null | UTF-8 | Python | false | false | 1,226 | py | from os import path
from click.testing import CliRunner
from dagit.debug import dagit_debug_command
from gevent import pywsgi
from dagster import execute_pipeline, lambda_solid, pipeline
from dagster.cli.debug import export_command
from dagster.core.test_utils import instance_for_test
@lambda_solid
def emit_one():
return 1
@pipeline
def test_pipe():
emit_one()
emit_one()
def test_roundtrip(monkeypatch):
runner = CliRunner()
with instance_for_test() as instance:
run_result = execute_pipeline(test_pipe, instance=instance)
assert run_result.success
file_path = path.join(instance.root_directory, ".temp.dump")
export_result = runner.invoke(export_command, [run_result.run_id, file_path])
assert "Exporting run_id" in export_result.output
assert file_path in export_result.output
# make dagit stop after launch
monkeypatch.setattr(pywsgi.WSGIServer, "serve_forever", lambda _: None)
debug_result = runner.invoke(dagit_debug_command, [file_path])
assert file_path in debug_result.output
assert "run_id: {}".format(run_result.run_id) in debug_result.output
assert "Serving on" in debug_result.output
| [
"[email protected]"
] | |
61b8dbbbc2d89a236263828285aac5e64b2dec48 | e8d719fe45dfbff9cbbc4ed872832cec6cabaca6 | /128_Longest_Consecutive_Sequence.py | 1ccccaf698c6be4b9c5f01f29d570fac2016dac6 | [] | no_license | nlfox/leetcode | 64f4f48d7f4be6df0542e51cc7037df40bf184a3 | d61363f99de3d591ebc8cd94f62544a31a026d55 | refs/heads/master | 2020-12-21T01:43:01.792899 | 2016-11-14T23:10:12 | 2016-11-14T23:10:12 | 56,680,839 | 2 | 0 | null | 2016-05-17T17:16:37 | 2016-04-20T11:19:58 | Python | UTF-8 | Python | false | false | 500 | py | class Solution(object):
def longestConsecutive(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
nums=list(set(nums))
nums.sort()
maxCnt = 1
cnt = 1
for i in xrange(1, len(nums)):
if nums[i] - 1 == nums[i - 1]:
cnt += 1
else:
cnt = 1
if cnt > maxCnt:
maxCnt = cnt
return maxCnt
print Solution().longestConsecutive([1,2,0,1])
| [
"[email protected]"
] | |
08a6fe350bd23f142bad00a4690285eba2230d71 | a9dc497a723917d4256ef15b2e9c3cf88a3fae4f | /GPinv/mean_functions.py | e2f7bd94404647f0cc8161fe0941bcca89455f3f | [
"Apache-2.0"
] | permissive | chaoshunh/GPinv | 83b8773870f7ccc8a636fc88ac9a96acfa306c54 | e46964991459cb43752cd344c18be0e197d439f8 | refs/heads/master | 2023-03-16T06:42:39.966449 | 2016-10-15T05:39:58 | 2016-10-15T05:39:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,458 | py | import tensorflow as tf
import numpy as np
from GPflow import mean_functions
from GPflow.param import Param,ParamList
from GPflow._settings import settings
float_type = settings.dtypes.float_type
np_float_type = np.float32 if float_type is tf.float32 else np.float64
class MeanFunction(mean_functions.MeanFunction):
"""
A wrap of GPflow.mean_functions.MeanFunction.
The main difference of this wrap is __call__ method, that returns
nxR sized tensor, in contrast to GPflow.mean_functions.MeanFunction, which
returns nx1 sized tensor.
"""
def __init__(self, output_dim):
"""
:param integer output_dim: number of output dimension, R.
"""
mean_functions.MeanFunction.__init__(self)
self.output_dim = output_dim
def __call__(self, X):
"""
:param tf.tensor x: nxD tensor.
:return tf.tensor: nxR tensor.
"""
raise NotImplementedError("Implement the __call__\
method for this mean function")
class Zero(MeanFunction):
""" Zero mean """
def __call__(self, X):
return tf.zeros([tf.shape(X)[0],self.output_dim], float_type)
class Constant(MeanFunction):
""" Constant mean """
def __init__(self, output_dim, c=None):
MeanFunction.__init__(self, output_dim)
if c is None:
c = np.ones(output_dim,np_float_type)
self.c = Param(c)
def __call__(self, X):
return tf.tile(tf.expand_dims(self.c,0), [tf.shape(X)[0],1])
class Stack(MeanFunction):
"""
Mean function that returns multiple kinds of mean values, stacked
vertically.
Input for the initializer is a list of MeanFunctions, [m_1,m_2,...,m_M].
The function call returns [m_1(X),m_2(X),...,m_M(X)].
The size of the return is n x (sum_i m_i.output_dim).
"""
def __init__(self, list_of_means):
"""
:param list list_of_means: A list of MeanFunction object.
"""
output_dim = 0
for m in list_of_means:
output_dim += m.output_dim
MeanFunction.__init__(self, output_dim)
# MeanFunctions are stored as ParamList
self.mean_list = ParamList(list_of_means)
def __call__(self, X):
"""
Return a concatenated tensor of the multiple mean functions.
The size of the return is n x (sum_i m_i.output_dim).
"""
return tf.concat(1, [l(X) for l in self.mean_list])
| [
"[email protected]"
] | |
5e6cdf4edc9dc9c9d2f8fb40591b1ad2f813a64d | 05805ab879654cdcf61df3653847f435b624dc77 | /Dictator_service/bin_gui/main_class_based_backup.py~ | 45eccae17af540925ac58c58102ba94e6188ffab | [] | no_license | Wuwqhnsya/Dictator | 3d57db6bc0138464884ddc9fe7378907ab86e3ef | 45388fec03a4acdac3620611b3bccfa3c991d65f | refs/heads/master | 2020-04-28T21:57:39.309165 | 2019-01-28T19:10:28 | 2019-01-28T19:10:28 | 175,600,478 | 1 | 0 | null | 2019-03-14T10:34:02 | 2019-03-14T10:34:02 | null | UTF-8 | Python | false | false | 31,361 | #!/usr/bin/python
import time
import threading
import time
import nmap
import multiprocessing
import os
import sys
import ConfigParser
#import mysql.connector
import MySQLdb
import atexit
import IPtable
import texttable as tt
import Simple_Logger
r = '\033[31m' #red
b = '\033[34m' #blue
g = '\033[32m' #green
y = '\033[33m' #yellow
m = '\033[34m' #magenta
c = '\033[36m' #magenta
e = '\033[0m' #end
def test():
print "\n\n\n Exiting Bye Bye !!!"
atexit.register(test)
class NmapScan:
def __init__(self):
self.IP=""
self.PORT=None
self.SWITCH=""
self.CURRENT_PROJECT_ID=""
self.takescan=""
self.N=4
self.Port_Divisior=7500
self.Pause_Flag=False
self.Stop_Flag=False
self.ipcount=0
self.IPtable=IPtable.IPtable()
self.method_id="INIT"
self.Thread_pool=[]
self.retry_count=0
self.max_retries=3
self.simple_logger=Simple_Logger.SimpleLogger()
self.lock=threading.Lock()
self.folder_name=os.path.join("Results","Data_")
def generate_Error_log(status,ipx,portx,pid):
try:
print "Logged exception"
'''self.data_path=self.folder_name+str(self.pid)
error_file=str(project_id)+"_error.txt"
error_file_path = os.path.join(self.data_path, error_file)
self.lock.acquire()
simple_logger.log(error_file_path,"Error -->,Status:Error Complete,Host :"+str(ipx)+",Port:"+str(portx)+",Project id :"+str(pid)+"\n")
self.lock.release()'''
except Exception ,ee:
print "Exception while writing to error file :"+str(ee)
def portscanner(self,ipx,portx): #switch,current_project_id
nm=nmap.PortScanner()
try:
if portx=="top_ports":
nm.scan(ipx,None,self.SWITCH)
else:
nm.scan(ipx,portx,self.SWITCH)
except Exception ,ex:
self.seperator()
print r+"\n\nEXCEPTION in nmap built in utiliry--> "+str(ex) +e
self.seperator()
self.seperator()
print g+"\n\nRe-attempts made on this record :"+str(self.retry_count)+e
self.seperator()
self.retry_count =self.retry_count+1
if (self.retry_count < self.max_retries):
print g+"\n\nRe-attemting for the failed record"+e
self.IPtable.UpdateStatus('incomplete',ipx,portx,int(self.CURRENT_PROJECT_ID))
else:
print g+"\n\nMax re attempts exceeded - Updating status to ERror-complete"+e
print r+"\n\nPlease see the error log for further details.IT would mention the host for which the nmap module failed"+e
self.IPtable.UpdateStatus('error-complete',ipx,portx,int(self.CURRENT_PROJECT_ID))
self.generate_Error_log('error-complete',ipx,portx,int(self.CURRENT_PROJECT_ID))
return 0
try:
temp=nm.scanstats()['uphosts']
if (int(temp) != 0):
host=ipx
if 'tcp' in nm[host].all_protocols():
self.seperator()
print "Result for IP : " + host
print('Protocol : TCP' )
for kk in nm[host]['tcp'].keys():
if (nm[host]['tcp'][kk]['name'])=='':
nm[host]['tcp'][kk]['name']='unknown'
lport = nm[ipx]['tcp'].keys()
lport.sort()
for port in lport:
print b+'port : ' +y+str(port) + ' \t ' + g+ nm[host]['tcp'][port]['state'] +' \t' +r +'' + nm[host]['tcp'][port]['name'] +e
self.seperator()
sd=nm.csv()
#print "Reached at update point "
try :
self.IPtable.Update(sd,portx,ipx,int(self.CURRENT_PROJECT_ID))
except Exception ,ee :
self.print_Log("Exception in update "+str(ee))
print "EXception Update main "+str(ee)
if 'udp' in nm[host].all_protocols():
self.seperator()
#self.IPtable.Update(sd,portx,ipx,int(self.CURRENT_PROJECT_ID))
print "Result for IP : " + host
print('Protocol : UDP' )
lport = nm[ipx]['udp'].keys()
lport.sort()
for kk in nm[host]['tcp'].keys():
if (nm[host]['udp'][kk]['name'])=='':
nm[host]['tcp'][kk]['name']='unknown'
for port in lport:
print b+'port : ' +y+str(port) + ' \t ' + g+ nm[host]['udp'][port]['state'] +' \t' +r +'' + nm[host]['udp'][port]['name'] +e
self.seperator()
sd=nm.csv()
try :
self.IPtable.Update(sd,portx,ipx,int(self.CURRENT_PROJECT_ID))
except Exception ,ee :
print "EXception Update main "+str(ee)
self.print_Log("Exception in update "+str(ee))
status="complete"
#print "\n\n\n!!!Completed!!! Ip : "+ipx+"\n\n\n -Protocols ---> "+str(nm[host].all_protocols())+"\n\n"
try :
self.IPtable.UpdateStatus(status,ipx,portx,int(self.CURRENT_PROJECT_ID))
except Exception ,ee :
self.print_Log("Exception in update status "+str(ee))
else:
statuss="host-down"
try :
self.IPtable.UpdateStatus(statuss,ipx,portx,int(self.CURRENT_PROJECT_ID))
except Exception ,ee :
self.print_Log("Exception in update status host-down "+str(ee))
except Exception,exc:
self.print_Log("Parent exception : "+str(exc))
def ThreadEnd(self,ipl):
print "\n\nThread ended with host ip -"+str(ipl)+"\n\n"
#startProcessing(1)
def simplescanner(self,ipl):
self.method_id="Simple scanner"
self.print_Log("Started Simple acanner")
stport=0
lsport=0
port_list=[]
process_list=[]
try :
port_list=self.IPtable.getPorts(str(ipl),self.CURRENT_PROJECT_ID)
if(port_list):
for port in port_list:
fport=str(port[0]) #fport=1 -5001
#print "\n\nFport is :"+fport +" IP :" +str(ipl) +"id :" +str(self.CURRENT_PROJECT_ID)
time.sleep(10)
try :
self.IPtable.UpdateStatus('processing',ipl,fport,int(self.CURRENT_PROJECT_ID))
except Exception, ee:
print "EXception 13.01 : " +str(ee)
tp=multiprocessing.Process(target=self.portscanner,args=(ipl,fport)) #
process_list.append(tp)
tp.start()
#print "\n\nStarted subprocess for ip " +str(ipl) +" and port "+ str(port) +" and Process : "+str(tp)
for process_ in process_list:
process_.join()
print "\n\n Finished subprocess for ip " +str(ipl) +" and Process : "+str(process_)
else:
#print "The current ip address has all its ports scanned -->Must have not been there" +str(ipl)
self.print_Log("Some exception-->The current ip address has all its ports scanned -->Must have not been there" +str(ipl))
self.print_Log("Ended Simple acanner")
except Exception ,ee:
print "EXception 11" +str(ee)
self.print_Log("Exception inSimpleScanner-->"+str(ee))
self.ThreadEnd(ipl)
def topport_scan(self,ipls,portl): #this would be invoked if the given port list would be empty such that only the top ports would be scanned
tp=multiprocessing.Process(target=portscanner,args=(ipls,"top_ports"))
tp.start()
tp.join()
def getBulkInsertList_(self,start,end,iplist):
#print "About to make bulk enteries - #Ip:"+ str(len(iplist) )
BulkList=[]
counter=1
#global P
for ip in iplist:
x=int(start)
pnum=end-start+1 #First port number in the sequence say 1-10023 is the range ->pnum =10023
r=pnum%self.Port_Divisior #r = 10023 % 5000 --> r=23
q=pnum//self.Port_Divisior # Floor division ->q=quetient= 10023/5000 => 2.004 ,since floor ,thus q=2
check=q*self.Port_Divisior #check =2*5000 =>10,000
#x=int(start) #x=1
ip_list=[]
while check>0: #(1) check=10000 >0 (2) check=5000 > 0
for tport in range(x,x+self.Port_Divisior,self.Port_Divisior):
fport=str(tport)+'-' +str(tport+self.Port_Divisior) #fport=1 -5001
BulkList.append((self.CURRENT_PROJECT_ID,ip,fport,'incomplete'))
x=x+self.Port_Divisior
check=check-self.Port_Divisior # (A) 1 --> check=5000 , (B) 1 --> check =0
counter=counter+1
#By this time 1-10,000 ports would be scanned .The idea is to scan 5000 ports at 1 time.
#The number of ports left are 23
check=q*self.Port_Divisior #check =10,000
#print "\n\n\n\n check is "+str(check )+" Pnum is "+str(pnum)+"\n\n\n\n"
if check < end :
if pnum!=0 : #pnum=10023
print "Scanning remaining ports"
prange=str(start+check)+"-"+str(start+check+r-1) #prange= (100001-10,0023) -->Thus the remaining 23 ports are ranged out
print "Range is :"+ prange+"\n\n\n"
BulkList.append((self.CURRENT_PROJECT_ID,ip,prange,'incomplete'))
print "\n\nLoop executed : "+str(counter)
return BulkList;
def getBulkInsertList(self,all_ports,iplist):
print "(1)--About to make bulk enteries - #Ip:"+ str(len(iplist))
BulkList=[]
if (all_ports == None) :
print "in if(1)"
all_Ports_="top_ports"
for ip in iplist:
BulkList.append((self.CURRENT_PROJECT_ID,ip,all_Ports_,'incomplete'))
elif "-" in all_ports:
print "in elif(1)"
tlist=all_ports.split('-') #Split them and the list would be stored in variable named tlist
stport=int(tlist[0]) #First port
lsport=int(tlist[1])
if ((lsport-stport)< 5000):
for ip in iplist:
BulkList.append((self.CURRENT_PROJECT_ID,ip,all_ports,'incomplete'))
else :
BulkList=self.getBulkInsertList_(stport,lsport,iplist)
else :
print "in else"
for ip in iplist:
BulkList.append((self.CURRENT_PROJECT_ID,ip,all_ports,'incomplete'))
#print "\n\nBulk List is \n\n"
#print BulkList
return BulkList
def multiscan(self,start,end,ipls): #This would be invokd when the number of ports per host to be scanned exceed 5000
pnum=end-start+1 #First port number in the sequence say 1-10023 is the range ->pnum =10023
r=pnum%5000 #r = 10023 % 5000 --> r=23
q=pnum//5000 # Floor division ->q=quetient= 10023/5000 => 2.004 ,since floor ,thus q=2
check=q*5000 #check =2*5000 =>10,000
x=int(start) #x=1
while check>0: #(1) check=10000 >0 (2) check=5000 > 0
for tport in range(x,x+5000,5000):
fport=str(tport)+'-' +str(tport+5000) #fport=1 -5001
tp=multiprocessing.Process(target=portscanner,args=(ipls,fport))
tp.start()
#tp.join()
x=x+5000 # (A) 1 --> x=5001 -->It will break from this loop (B) 1 --> x=10,001 -->it shall break the loop
# print "Scan from " + str(tport) + " till " + str(tport+5000)+ " Done"
check=check-5000 # (A) 1 --> check=5000 , (B) 1 --> check =0
#By this time 1-10,000 ports would be scanned .The idea is to scan 5000 ports at 1 time.
#The number of ports left are 23
check=q*5000 #check =10,000
if pnum!=0: #pnum=10023
# print "Scanning remaining ports"
prange=str(start+check)+"-"+str(start+check+r-1) #prange= (100001-10,0023) -->Thus the remaining 23 ports are ranged out
# print prange
tp=multiprocessing.Process(target=portscanner,args=(ipls,prange)) #Finally invoking the cpode portscanner for remaining 23 ports with range (10,001 -10,023)
tp.start()
#tp.join()
def singlescan(self,start,end,ipls):
#print "Single Scan"
prange=str(start)+"-"+str(end)
tp=multiprocessing.Process(target=portscanner,args=(ipls,prange))
tp.start()
tp.join()
def numofips(self,iprange): #Converts CIDR notation as simple list
scanner=nmap.PortScanner()
IPlist=scanner.listscan(iprange)
return IPlist #Thus this wosuld be a list of IP addres
def banner(self,):
print g+" ################################################################# "+e
print g+" ###"+r+" __ "+g+"### "+e
print g+" ###"+r+" /\ \ \_ __ ___ __ _ _ __ "+g+"### "+e
print g+" ###"+r+" / \/ / '_ ` _ \ / _` | '_ \ "+g+"### "+e
print g+" ###"+r+"/ /\ /| | | | | | (_| | |_) | "+g+"### "+e
print g+" ###"+r+"\_\ \/ |_| |_| |_|\__,_| .__/ "+g+"### "+e
print g+" ###"+r+" |_| "+g+"### "+e
print g+" ###"+r+" _ _ "+g+"### "+e
print g+" ###"+r+" /_\ _ _| |_ ___ _ __ ___ __ _| |_(_) ___ _ __ "+g+"### "+e
print g+" ###"+r+" //_\\| | | | __/ _ \| '_ ` _ \ / _` | __| |/ _ \| '_ \ "+g+"### "+e
print g+" ###"+r+"/ _ \ |_| | || (_) | | | | | | (_| | |_| | (_) | | | | "+g+"### "+e
print g+" ###"+r+"\_/ \_/\__,_|\__\___/|_| |_| |_|\__,_|\__|_|\___/|_| |_| "+g+"### "+e
print g+" ###"+r+" "+g+"### "+e
print g+" ###"+r+" __ _ _ "+g+"### "+e
print g+" ###"+r+"/ _\ ___ _ __(_)_ __ | |_ "+g+"### "+e
print g+" ###"+r+"\ \ / __| '__| | '_ \| __| "+g+"### "+e
print g+" ###"+r+"_\ \ (__| | | | |_) | |_ "+g+"### "+e
print g+" ###"+r+"\__/\___|_| |_| .__/ \__| "+g+"### "+e
print g+" ###"+r+" |_| "+g+"### "+e
print g+" ###"+b+" Written by: M$P@T3L "+g+"### "+e
print g+" ################################################################# "+e
def seperator(self):
print r+ "----------------------------------------------" +e
def create_schema(self):
with open(schema_file, 'rt') as f:
schema = f.read()
conn.executescript(schema)
def prompt_project(self):
projectname=raw_input(b+"What is your Project name(no white spaces)? \n>"+y)
return projectname
def prompt_ips(self):
ips=raw_input(b+"Type the IP range: \n>"+y)
IP=ips
return ips
def prompt_ports(self):
ports=raw_input(b+"Enter the Port number or Ports range: \n>"+y)
#global PORT
if ports == "":
self.PORT=None
elif(ports=="*"):
self.PORT="1-65535"
else:
self.PORT=ports
return self.PORT
def print_Log(self,message):
print str(message)
def print_Error(self,message):
print str(message)
def db_projectname(self,projectname_db,IP_range,Port_range): # Store the project name and return the auto generated id
self.method_id="db_projectname"
self.print_Log("Method started")
print "Hello"
time.sleep(10)
try :
pid=self.IPtable.Insert(projectname_db,IP_range,Port_range)
if (pid !=-1):
self.CURRENT_PROJECT_ID=pid
else:
self.print_Log("Some error occured while storing !!" +str(pid))
self.print_Log("Method ended")
except Exception ,ee :
self.print_Error( "Exception in db_projectname "+str(ee))
#print self.CURRENT_PROJECT_ID
#print cursor.lastrowid
def scanbanner(self):
cp=ConfigParser.RawConfigParser() #parses config files
cppath="nmap.cfg" #This is the config file to be read.The config file would have various sections.Each section would be in [sq] beakets.each section would be having key/val pairs as conf setting options
cp.read(cppath) #Read the current file nmap.cfg.The file has got only 1 section given as :[Scantype]
#global self.SWITCH
#global self.takescan
print b+"SELECT THE TYPE OF SCAN: "
self.seperator()
print y+"1). Intense Scan"
print "2). Intense + UDP Scan"
print "3). Intense + TCP full Scan"
print "4). Intense + No Ping Scan"
print "5). TCP Ping Scan"
print "6). PCI Ping Sweep"
print "7). PCI full ports TCP"
print "8). PCI Top 200 UDP"
print "9). PCI Top 100 UDP"
print "10). PCI Top 1000 TCP"
self.takescan=raw_input(b+"Select the type of Scan:\n>"+y)
if self.takescan=="1":
self.SWITCH=cp.get('Scantype','Intense')
elif self.takescan == "2":
self.SWITCH=cp.get('Scantype','Intense_UDP') #-sU -T4 -A -n
elif self.takescan == "3":
self.SWITCH=cp.get('Scantype','Intense_TCPall') #-sS -T4 -A -n--max-rtt-timeout 500ms
elif self.takescan == "4":
self.SWITCH=cp.get('Scantype','Intense_NoPing') #T4 -A -v -Pn -n
elif self.takescan == "5":
self.SWITCH=cp.get('Scantype','Ping') #-PS
elif self.takescan == "6":
self.SWITCH=cp.get('Scantype','PCI_Ping_Sweep') #-PE -n -oA
elif self.takescan == "7":
self.SWITCH=cp.get('Scantype','PCI_Full_ports_TCP') #-Pn -sS -sV -n --max-retries 3 --max-rtt-timeout 1000ms --top-ports 1000
elif self.takescan == "8":
self.SWITCH=cp.get('Scantype','PCI_Top_200_UDP') #-Pn -sU -sV -n --max-retries 3 --max-rtt-timeout 100ms --top-ports 200
elif self.takescan == "9":
self.SWITCH=cp.get('Scantype','PCI_Top_100_UDP') #-Pn -sU -sV -n --max-retries 3 --max-rtt-timeout 100ms --top-ports 100
elif self.takescan == "10":
self.SWITCH=cp.get('Scantype','PCI_Top_1000_TCP') #-Pn -sS -sV -n --max-retries 3 --max-rtt-timeout 500ms
else:
print "Invalid value supplied"
print "Using Default(1)"
self.SWITCH=cp.get('Scantype','Intense')
def prompt_ProjectID(self): #would prompt the user with paused projects -->status=incomplete or paused in projects table
print "\n"
tab = tt.Texttable()
x = [[]] #multi dimension array
cursor=self.IPtable.getPausedScans()
if cursor:
print r+"List of Project with IDs"+e +"\n"
for row in cursor:
x.append([str(row[0]),str(row[1])]) #Place details in the array to display later
tab.add_rows(x) #thus the table would have all rows and 2 columns
tab.set_cols_align(['r','r'])
tab.header(['IDs','PROJECT_NAME']) #setting heder details for col
print tab.draw() #this would draw the table on the console
print "\n"
id_ = raw_input(b+"Enter The Project Id For Scanning :"+e)
try :
if(int(id_)):
return id_
except :
print "Exception 6-->Invalid Value"
return ""
else:
print "\n\nNo incomplete Projects\n\n";
time.sleep(1);
self.main()
def prompt_ScanType(self):
scanType=raw_input(b+"Enter Your choice: \n"+y +"\n(1) For Launching New Scan \n(2) For Launching Paused Scans\n "+e)
try:
if((int(scanType)<1)or(int(scanType) >2)):
return 1;
else :
return scanType;
except :
return 1;
def getHostPort(self,project_id):
try:
self.method_id="getHostPort()-->main"
self.print_Log("Started")
project_data=[]
project_data=self.IPtable.getHostPort(project_id)
self.method_id="getHostPort()-->main"
self.print_Log("Ended")
return project_data
except Exception ,ee:
print "Exception 14" +str(ee)
self.print_Error("Exception --getHostPort--"+str(ee))
return 0;
def launch_PausedScan(self,project_id):
print "Reached Here in Launch Paused Scan !!!\n";
self.method_id="LaunchPausedScan()"
self.print_Log( "Started Launch Paused ")
success=self.IPtable.MakeUpdate(project_id)
if(success==1):
self.startProcessing(self.N)
elif(success==2): #when its paused b4 making bulk entries
port_host=self.getHostPort(project_id)
if(port_host):
ip_range=port_host[0]
port_range=port_host[1]
listip=self.numofips(ip_range)
BulkEntries=self.makeBulkEnteries(listip,port_range)
#global N
self.startProcessing(self.N)
else:
print "The given project id is not present in Database :-->Kindly recheck "
self.print_Log("The given project id is not present in Database :-->Kindly recheck ")
else:
print "\n\nThe update method for status= incomplete has exception \n\n"
self.print_Log("The update method for status= incomplete has exception ")
def stop_all(self):
os._exit()
def makeBulkEnteries(self,all_hosts,all_ports):
#print "In here !!1"
self.method_id="makeBulkEntries()"
self.print_Log("Started")
BulkList=[]
if 1:
BulkList=self.getBulkInsertList(all_ports,all_hosts)
self.method_id="makeBulkEntries()"
self.method_id="makeBulkEntries"
try:
status=self.IPtable.InsertAll(BulkList)
self.method_id="makeBulkEntries()"
if (status != 1):
print "Some error occured while bulk insertion"
except Exception ,ee :
print "EXception 9 "+str(ee)
self.print_Error("EXception make Bulk entries --> "+str(ee))
self.print_Log("Ended")
return BulkList;
def getAllDistinctHosts(self,n):
try :
self.method_id="getAllDistinctHost()"
self.print_Log("started")
iplist=[]
iplist=self.IPtable.DistinctHosts(self.CURRENT_PROJECT_ID,int(n))
self.method_id="getAllDistinctHost()"
self.print_Log("Ended")
return iplist
except Exception ,ee :
print "Exception 10 " +str (ee)
self.print_Error("Exception "+str(ee))
return 0
def start_Polling(self):
try:
stop_db_poll=False #use this logic to stop unnecessary db poll when all hosts finish
#global N
while 1:
time.sleep(5)
active_threads=threading.enumerate()
counter=len(active_threads)
print self.seperator()
print "Polling \n Threads remaining are :"+str(active_threads)+"\n"
print self.seperator()
#if some thread might die-->processing or lets say that initially all rec have status as incomplete and the parent thread would be the polling thread.The status is changed to be processing by the threads that are started by the parent thread.Say for some reason the parent thread would start a thread ,but it might not be scheduled by the scheduler ,and the polling thread would be running asynchronously,the polling thread would immidiately detect the thread count to be =1 as the child threads would have not been scheduled yet ,thus the status would also not be as processing...it would show to be of type incomplete--->thus keeping this condition at head its importent to check herethat if the thread count =1-->main thread only then there should be no record with status as incomplete or processing.Now lets say a person has intentionally paused the scan ,then in that case the project-table would show the status as paused and iptable might contain both entries as processing and incomplete.That use case would be ignored and the scan would come to end
if(counter==1):
status=self.IPtable.checkStatus(self.CURRENT_PROJECT_ID)
if(status):
processing_status=status[0]
pause_status=status[1]
if((processing_status) and (not (pause_status))):#will just check once
print "Still left with some hosts that display status as processing or incomplete "
time.sleep(10)#the reason for this delay is suppose some thread is fired but not scheduled yet and thus the status would show as incomplete and if we immidiately statprocessing,then 2 threads might point to 1 record
self.startProcessing(self.N)
#print "Main Thread--->Again Starting pooling in 50 sec :"
time.sleep(50)
else:
print "Active Threads are only 1 --Scan about to finish --Threads remaining are :"+str(active_threads)
self.print_Log("Active Threads are only 1 --Scan about to finish --Threads remaining are :"+str(active_threads))
break;
#include logic to stop unnecessary polling see count (*) where status=p if that=limit then dont poll
elif(counter <=(self.N+1)):
if(not(self.getPausedStatus(self.CURRENT_PROJECT_ID))):
limit=(self.N+1)-counter
if(limit != 0):
#print "\n\nLaunching :"+str(limit)+" Threads for hosts"
left_hosts=self.startProcessing(limit) #chk if its 0 then break or dont poll till current th fn
#print "Making main thread sleep for 1 seconds"
time.sleep(1)
#print "Waking main thread awake after 1 seconds"
else:
#print "Making main thread sleep for 1 seconds"
time.sleep(1)
#print "Waking main thread awake after 1 seconds"
else:
time.sleep(10)
else :
print "\n\n\n\n------FATEL ERROR-------\n\n\n"
print "Number of threads cant exceed : "+str(self.N+1)
except Exception ,ee:
print "Exception caught 15" +str(ee)
def StartThreads(self,hosts):
#print "\n In start thread method !!! \n"
self.method_id="Start THreads"
threads=[]
#print "Starting : "+str(len(hosts)) +"Threads for "+ str(hosts) +"Hosts :"
print "\n"
print self.seperator()
self.print_Log("Starting : "+str(len(hosts)) +"Threads for "+ str(hosts) +"Hosts" )
print self.seperator()
print "\n"
for host in hosts:
#print "host is "+str(host)
lk= threading.enumerate()
#print "\n Current thread count : "+str(len(lk))
#print "\n\nThe threads enumerate returned are : " +str(lk) +"\n\n"
self.print_Log(g+"******************************************************************************************************************************************\n"+e+"Current thread count : "+str(len(lk)))
self.print_Log("The threads enumerate returned are : " +str(lk)+g+"\n******************************************************************************************************************************************"+e)
if len(lk)<(self.N+1) :
currentIP= str(host)
obj=NmapScan()
obj.IP=self.IP
obj.PORT=self.PORT
obj.SWITCH=self.SWITCH
obj.CURRENT_PROJECT_ID=self.CURRENT_PROJECT_ID
obj.takescan=self.takescan
obj.N=self.N
obj.Port_Divisior=self.Port_Divisior
obj.Pause_Flag=self.Pause_Flag
obj.Stop_Flag=self.Stop_Flag
obj.ipcount=self.ipcount
obj.IPtable=IPtable.IPtable()
obj.simple_logger=self.simple_logger
#self.method_id="INIT"
t = threading.Thread(target=obj.simplescanner, args=([currentIP]))
threads.append(t)
#print "Starting thread for IP :"+str(host)
#self.print_Log("Starting thread for IP :"+str(host))
t.start()
self.Thread_pool.append(t)
#print "\n\n\nStarted thread for IP :"+str(host) + " --> Thread is : "+ str(t)
self.print_Log( "\nStarted thread for IP :"+str(host) + " --> Thread is : "+ str(t))
time.sleep(3)
def startProcessing(self,n):
try :
All_hosts=self.getAllDistinctHosts(n)
#print "Hosts to be given to thread : "+str(All_hosts)
if (All_hosts):
self.StartThreads(All_hosts)
else :
return;
except Exception ,ee :
print "Exception 12 " +str(ee)
def getPausedStatus(self,project_id):
try :
status=self.IPtable.getStatus(project_id)
return status
except Exception ,ee:
print "Exception getstatus " +str(ee)
return 0
def pause_scan(self):
global Pause
Pause =1
stop_all();
def main(self,path='',targethosts='',targetports='',switch='',scan_type='',mode="c",project_id='',assessment_id='',app_id=''):
if (scan_type=="1"):
self.SWITCH=switch
self.PORT=targetports
print "The mode recieved is :" +str(mode)
if(mode=="c"):
self.db_projectname(path,targethosts,self.PORT)
self.seperator()
elif mode =="g-init":
if assessment_id =='':
return;
else:
self.db_projectname(path,targethosts,self.PORT)
self.IPtable.update_mapping(app_id,CURRENT_PROJECT_ID,assessment_id)
return self.CURRENT_PROJECT_ID
elif mode=="g-start":
self.CURRENT_PROJECT_ID=int(project_id)
x=333#gui mode
print b +"[+]" + "Starting SCAN" +e
#targethosts=['10.0.1.39','10.0.1.39','10.0.1.39','10.0.1.39']
ipcount=len(self.numofips(targethosts))
if (',' in targethosts):
listip=targethosts.split(',')
else:
listip=self.numofips(targethosts)
BulkEntries=self.makeBulkEnteries(listip,self.PORT)
#global N
self.startProcessing(self.N) #this is the part wher the prompt input finishes
#print "Main Thread Starting pooling in 50 sec :"
time.sleep(100)
# "**Pooling started **\n"
self.method_id="Main()"
self.print_Log("**Pooling started :**")
self.start_Polling()
#print "\n\n\n\n\nScan Finished\n\n\n\n\n "
else:
#global self.CURRENT_PROJECT_ID
if (mode=="c"):
self.CURRENT_PROJECT_ID=self.prompt_ProjectID()
else:
self.CURRENT_PROJECT_ID=int(project_id)
if (self.CURRENT_PROJECT_ID != ""):
self.launch_PausedScan(self.CURRENT_PROJECT_ID)
print "\n\nMain thread starting Polling .........\n\n"
print "Main Thread Starting pooling in 10 sec :"
time.sleep(100)
print "Pooling started :"
self.start_Polling()
def driver_main(self,ips='',project_name='',port='',scan_type='',switch='',project_id='',mode="c",assessment_id="",app_id=""):
try:
print ("("+ips,project_name,port,scan_type,switch,project_id,mode,assessment_id,app_id+")")
print "\n\n Hello world \n\n"
time.sleep(10)
start = time.time()
os.system('cls' if os.name == 'nt' else 'clear')
db_filename="nmapscan"
start = time.time()
#self.main()
#mode="c"path='',targethosts='',targetports='',switch='',scan_type='',mode="c",project_id=''):
self.main(project_name,ips,port,switch,scan_type,mode,project_id,assessment_id,app_id)
print "Reached here as well !!!"
if mode != "g-init" :
th_count=threading.enumerate()
print "# of threads Alive are :"+str(len(th_count))
#while (1) :
if 1:
if (len(th_count)==1):
print "\nNow stopping and saving Global Project Id : "+ str(self.CURRENT_PROJECT_ID)+"\n";
#global self.CURRENT_PROJECT_ID
if ((self.CURRENT_PROJECT_ID != "") and (self.CURRENT_PROJECT_ID is not None)):
status=self.IPtable.checkStatus(self.CURRENT_PROJECT_ID)#if some thread might die-->processing or lets say that initially all rec have status as incomplete and the parent thread would be the polling thread.The status is changed to be processing by the threads that are started by the parent thread.Say for some reason the parent thread would start a thread ,but it might not be scheduled by the scheduler ,and the polling thread would be running asynchronously,the polling thread would immidiately detect the thread count to be =1 as the child threads would have not been scheduled yet ,thus the status would also not be as processing...it would show to be of type incomplete--->thus keeping this condition at head its importent to check herethat if the thread count =1-->main thread only then there should be no record with status as incomplete or processing.Now lets say a person has intentionally paused the scan ,then in that case the project-table would show the status as paused and iptable might contain both entries as processing and incomplete.That use case would be ignored and the scan would come to end
if(status):
processing_status=status[0]
pause_status=status[1]
if((processing_status) and (not (pause_status))):#will just check once
print "Still left with some hosts that display status as processing !!!"
time.sleep(10)#the reason for this delay is suppose some thread is fired but not scheduled yet and thus the status would show as incomplete and if we immidiately statprocessing,then 2 threads might point to 1 record
self.startProcessing(self.N)
print "Main Thread--->Again Starting pooling in 50 sec :"
time.sleep(50)
print "Polling started-->again :"
self.start_Polling()
#xx=2
if ((not(processing_status)) and (not(pause_status))): #to update status from incompl to comp
print "Launching clear logs !!!"
self.IPtable.clearLogs(self.CURRENT_PROJECT_ID,'complete')
#else :
#clearLogs(self.CURRENT_PROJECT_ID,'complete')
end_time = time.time()
print "Time taken in seconds : "+str(end_time-start)
elif mode =="g-init":
print "\n\nPROPER\n\n"
return self.CURRENT_PROJECT_ID
except KeyboardInterrupt:
print c+"\n[*]"+g+" Scan is Aborted"+e
print c+"[*]"+g+" Stopping"+e
self.print_Log("\n[*]"+g+" Scan is Aborted")
time.sleep(1)
pass
except Exception ,ee:
self.print_Log("Exception in driver() "+str(ee))
#NmapScanObj=NmapScan()
#NmapScanObj.driver_main()
| [
"[email protected]"
] | ||
8f1ad2f8765959e86316c7bf0ab8d91cc8ace7c1 | 8fc2ab3d29a30e603e19b30bb9517928de529167 | /recursion_count7.py | da54e1e6515b053de42599e4a335a9a73680a757 | [] | no_license | rushilchugh/Practise | 35a9861bec6786580dc0a440eb25d78e43cb7bc9 | 98fd593b95dad641bef1d519c6c6ed1daaae630f | refs/heads/master | 2020-03-13T21:14:14.013604 | 2018-04-27T12:23:50 | 2018-04-27T12:23:50 | 131,291,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 193 | py | __author__ = 'Rushil'
def count7(n):
if n == 0:
return 0
if n%10 == 7:
return 1 + count7(n//10)
else:
return count7(n//10)
print(count7(77177)) | [
"[email protected]"
] | |
63c238fb79bd102fc4c18ace975c001acfa866d3 | cc1b87f9368e96e9b3ecfd5e0822d0037e60ac69 | /dashboard/dashboard/pinpoint/models/change/commit.py | aca059a9444f5d268ace099bd531f27d6f1d8525 | [
"BSD-3-Clause"
] | permissive | CTJyeh/catapult | bd710fb413b9058a7eae6073fe97a502546bbefe | c98b1ee7e410b2fb2f7dc9e2eb01804cf7c94fcb | refs/heads/master | 2020-08-19T21:57:40.981513 | 2019-10-17T09:51:09 | 2019-10-17T18:30:16 | 215,957,813 | 1 | 0 | BSD-3-Clause | 2019-10-18T06:41:19 | 2019-10-18T06:41:17 | null | UTF-8 | Python | false | false | 12,099 | py | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import collections
import datetime
import re
from google.appengine.ext import deferred
from dashboard.pinpoint.models.change import commit_cache
from dashboard.pinpoint.models.change import repository as repository_module
from dashboard.services import gitiles_service
class NonLinearError(Exception):
"""Raised when trying to find the midpoint of Changes that are not linear."""
Dep = collections.namedtuple('Dep', ('repository_url', 'git_hash'))
def ParseDateWithUTCOffset(date_string):
# Parsing the utc offset within strptime isn't supported until python 3, so
# using workaround from https://stackoverflow.com/questions/26165659/
if '+' in date_string:
utc_sign = '+'
elif '-' in date_string:
utc_sign = '-'
else:
utc_sign = None
if utc_sign:
date_string, utc_offset = date_string.split(utc_sign)
date_string = date_string.strip()
dt = datetime.datetime.strptime(
date_string, '%a %b %d %H:%M:%S %Y')
if utc_sign and len(utc_offset) == 4:
if utc_sign == '+':
dt -= datetime.timedelta(
hours=int(utc_offset[0:2]), minutes=int(utc_offset[2:]))
elif utc_sign == '-':
dt += datetime.timedelta(
hours=int(utc_offset[0:2]), minutes=int(utc_offset[2:]))
return dt
class Commit(collections.namedtuple('Commit', ('repository', 'git_hash'))):
"""A git repository pinned to a particular commit."""
def __init__(self, *args, **kwargs):
super(Commit, self).__init__(*args, **kwargs)
self._repository_url = None
def __str__(self):
"""Returns an informal short string representation of this Commit."""
return self.repository + '@' + self.git_hash[:7]
@property
def id_string(self):
"""Returns a string that is unique to this repository and git hash."""
return self.repository + '@' + self.git_hash
@property
def repository_url(self):
"""The HTTPS URL of the repository as passed to `git clone`."""
cached_url = getattr(self, '_repository_url', None)
if not cached_url:
self._repository_url = repository_module.RepositoryUrl(self.repository)
return self._repository_url
def Deps(self):
"""Return the DEPS of this Commit.
Returns Dep namedtuples with repository URLs instead of Commit objects,
because Commit objects must have their repositories verified in the
datastore, and we'd like to do that more lazily.
Returns:
A frozenset of Dep (repository_url, git_hash) namedtuples.
"""
# Download and execute DEPS file.
try:
deps_file_contents = gitiles_service.FileContents(
self.repository_url, self.git_hash, 'DEPS')
except gitiles_service.NotFoundError:
return frozenset() # No DEPS file => no DEPS.
deps_data = {'Var': lambda variable: deps_data['vars'][variable]}
exec(deps_file_contents, deps_data) # pylint: disable=exec-used
# Pull out deps dict, including OS-specific deps.
deps_dict = deps_data.get('deps', {})
if not deps_dict:
return frozenset()
for deps_os in deps_data.get('deps_os', {}).values():
deps_dict.update(deps_os)
# Pull out vars dict to format brace variables.
vars_dict = deps_data.get('vars', {})
# Convert deps strings to repository and git hash.
commits = []
for dep_value in deps_dict.values():
if isinstance(dep_value, basestring):
dep_string = dep_value
else:
if 'url' not in dep_value:
# We don't support DEPS that are CIPD packages.
continue
dep_string = dep_value['url']
if 'revision' in dep_value:
dep_string += '@' + dep_value['revision']
dep_string_parts = dep_string.format(**vars_dict).split('@')
if len(dep_string_parts) < 2:
continue # Dep is not pinned to any particular revision.
if len(dep_string_parts) > 2:
raise NotImplementedError('Unknown DEP format: ' + dep_string)
repository_url, git_hash = dep_string_parts
if repository_url.endswith('.git'):
repository_url = repository_url[:-4]
commits.append(Dep(repository_url, git_hash))
return frozenset(commits)
def AsDict(self):
d = {
'repository': self.repository,
'git_hash': self.git_hash,
}
d.update(self.GetOrCacheCommitInfo())
d['created'] = d['created'].isoformat()
commit_position = _ParseCommitPosition(d['message'])
if commit_position:
d['commit_position'] = commit_position
review_url = _ParseCommitField('Reviewed-on: ', d['message'])
if review_url:
d['review_url'] = review_url
change_id = _ParseCommitField('Change-Id: ', d['message'])
if change_id:
d['change_id'] = change_id
return d
@classmethod
def FromDep(cls, dep):
"""Create a Commit from a Dep namedtuple as returned by Deps().
If the repository url is unknown, it will be added to the local datastore.
Arguments:
dep: A Dep namedtuple.
Returns:
A Commit.
"""
repository = repository_module.RepositoryName(
dep.repository_url, add_if_missing=True)
commit = cls(repository, dep.git_hash)
commit._repository_url = dep.repository_url
return commit
@classmethod
def FromData(cls, data):
"""Create a Commit from the given request data.
Raises:
KeyError: The repository name is not in the local datastore,
or the git hash is not valid.
ValueError: The URL has an unrecognized format.
"""
if isinstance(data, basestring):
return cls.FromUrl(data)
else:
return cls.FromDict(data)
@classmethod
def FromUrl(cls, url):
"""Create a Commit from a Gitiles URL.
Raises:
KeyError: The URL's repository or commit doesn't exist.
ValueError: The URL has an unrecognized format.
"""
url_parts = url.split('+')
if len(url_parts) != 2:
raise ValueError('Unknown commit URL format: ' + url)
repository, git_hash = url_parts
return cls.FromDict({
'repository': repository[:-1],
'git_hash': git_hash[1:],
})
@classmethod
def FromDict(cls, data):
"""Create a Commit from a dict.
If the repository is a repository URL, it will be translated to its short
form name.
Raises:
KeyError: The repository name is not in the local datastore,
or the git hash is not valid.
"""
repository = data['repository']
git_hash = data['git_hash']
# Translate repository if it's a URL.
if repository.startswith('https://'):
repository = repository_module.RepositoryName(repository)
try:
# If they send in something like HEAD, resolve to a hash.
repository_url = repository_module.RepositoryUrl(repository)
try:
# If it's already in the hash, then we've resolved this recently, and we
# don't go resolving the data from the gitiles service.
result = commit_cache.Get(git_hash)
except KeyError:
result = gitiles_service.CommitInfo(repository_url, git_hash)
git_hash = result['commit']
except gitiles_service.NotFoundError as e:
raise KeyError(str(e))
commit = cls(repository, git_hash)
commit._repository_url = repository_url
# IF this is something like HEAD, cache this for a short time so that we
# avoid hammering gitiles.
if not gitiles_service.IsHash(data['git_hash']):
commit.CacheCommitInfo(result, memcache_timeout=30*60)
return commit
@classmethod
def CommitRange(cls, commit_a, commit_b):
# We need to get the full list of commits in between two git hashes, and
# only look into the chain formed by following the first parents of each
# commit. This gives us a linear view of the log even in the presence of
# merge commits.
commits = []
# The commit_range by default is in reverse-chronological (latest commit
# first) order. This means we should keep following the first parent to get
# the linear history for a branch that we're exploring.
expected_parent = commit_b.git_hash
commit_range = gitiles_service.CommitRange(commit_a.repository_url,
commit_a.git_hash,
commit_b.git_hash)
for commit in commit_range:
# Skip commits until we find the parent we're looking for.
if commit['commit'] == expected_parent:
commits.append(commit)
if 'parents' in commit and len(commit['parents']):
expected_parent = commit['parents'][0]
return commits
def GetOrCacheCommitInfo(self):
try:
return commit_cache.Get(self.id_string)
except KeyError:
commit_info = gitiles_service.CommitInfo(
self.repository_url, self.git_hash)
return self.CacheCommitInfo(commit_info)
def CacheCommitInfo(self, commit_info, memcache_timeout=None):
url = self.repository_url + '/+/' + commit_info['commit']
author = commit_info['author']['email']
created = ParseDateWithUTCOffset(commit_info['committer']['time'])
subject = commit_info['message'].split('\n', 1)[0]
message = commit_info['message']
commit_cache.Put(
self.id_string, url, author, created, subject, message,
memcache_timeout=memcache_timeout)
return {
'url': url,
'author': author,
'created': created,
'subject': subject,
'message': message,
}
@classmethod
def Midpoint(cls, commit_a, commit_b):
"""Return a Commit halfway between the two given Commits.
Uses Gitiles to look up the commit range.
Args:
commit_a: The first Commit in the range.
commit_b: The last Commit in the range.
Returns:
A new Commit representing the midpoint.
The commit before the midpoint if the range has an even number of commits.
commit_a if the Commits are the same or adjacent.
Raises:
NonLinearError: The Commits are in different repositories or commit_a does
not come before commit_b.
"""
if commit_a == commit_b:
return commit_a
if commit_a.repository != commit_b.repository:
raise NonLinearError('Repositories differ between Commits: %s vs %s' %
(commit_a.repository, commit_b.repository))
commits = cls.CommitRange(commit_a, commit_b)
# We don't handle NotFoundErrors because we assume that all Commits either
# came from this method or were already validated elsewhere.
if len(commits) == 0:
raise NonLinearError('Commit "%s" does not come before commit "%s".' %
(commit_a, commit_b))
if len(commits) == 1:
return commit_a
commits.pop(0) # Remove commit_b from the range.
# Batch up the commits into chunks of 100 to avoid exceeding the size limit
# for the bound data in calls to `deferred.defer(...)`.
for offset in range(0, len(commits), 100):
deferred.defer(_CacheCommitDetails, commit_a.repository,
commits[offset:offset + 100])
return cls(commit_a.repository, commits[len(commits) // 2]['commit'])
def _CacheCommitDetails(repository, commits):
for cur in commits:
c = Commit(repository, cur['commit'])
c.CacheCommitInfo(cur)
def _ParseCommitPosition(commit_message):
"""Parses a commit message for the commit position.
Args:
commit_message: The commit message as a string.
Returns:
An int if there is a commit position, or None otherwise."""
match = re.search('^Cr-Commit-Position: [a-z/]+@{#([0-9]+)}$',
commit_message, re.MULTILINE)
if match:
return int(match.group(1))
return None
def _ParseCommitField(field, commit_message):
for l in commit_message.splitlines():
match = l.split(field)
if len(match) == 2:
return match[1]
return None
| [
"[email protected]"
] | |
472044e752d68a4fdb699ce05e0e059b80d169c0 | 7882ca50a06197dcbf0490c0318083e0a90f9f84 | /cython/wrap_arrays/test_cython_wrapper.py | 366b02fbedb75e936290d32cd6d39165d607e3ba | [
"MIT"
] | permissive | hannesk95/Python-Interface-Cpp | c8637a4c0c597b9b56facafeba7888ebc5ecb04a | 398eab0c6e7f5e0358edb6644c71b5fdc6b2606a | refs/heads/master | 2022-03-08T15:35:58.597336 | 2019-11-09T18:11:10 | 2019-11-09T18:11:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,106 | py | # coding=utf-8
import cyfastlz
def test_compress_and_decompress_roundtrip():
# Beginning of the United States Constitution
text ="""We the People of the United States, in Order to form a more perfect Union, establish Justice, insure domestic Tranquility, provide for the common defence, promote the general Welfare, and secure the Blessings of Liberty to ourselves and our Posterity, do ordain and establish this Constitution for the United States of America.
Article I (Article 1 - Legislative)
Section 1
All legislative Powers herein granted shall be vested in a Congress of the United States, which shall consist of a Senate and House of Representatives.
Section 2
1: The House of Representatives shall be composed of Members chosen every second Year by the People of the several States, and the Electors in each State shall have the Qualifications requisite for Electors of the most numerous Branch of the State Legislature.
2: No Person shall be a Representative who shall not have attained to the Age of twenty five Years, and been seven Years a Citizen of the United States, and who shall not, when elected, be an Inhabitant of that State in which he shall be chosen.
3: Representatives and direct Taxes shall be apportioned among the several States which may be included within this Union, according to their respective Numbers, which shall be determined by adding to the whole Number of free Persons, including those bound to Service for a Term of Years, and excluding Indians not taxed, three fifths of all other Persons.2 The actual Enumeration shall be made within three Years after the first Meeting of the Congress of the United States, and within every subsequent Term of ten Years, in such Manner as they shall by Law direct. The Number of Representatives shall not exceed one for every thirty Thousand, but each State shall have at Least one Representative; and until such enumeration shall be made, the State of New Hampshire shall be entitled to chuse three, Massachusetts eight, Rhode-Island and Providence Plantations one, Connecticut five, New-York six, New Jersey four, Pennsylvania eight, Delaware one, Maryland six, Virginia ten, North Carolina five, South Carolina five, and Georgia three.
4: When vacancies happen in the Representation from any State, the Executive Authority thereof shall issue Writs of Election to fill such Vacancies.
5: The House of Representatives shall chuse their Speaker and other Officers; and shall have the sole Power of Impeachment."""
# Convert the ntext to a bytes
text_bytes = text.encode()
# Compress a known text
compressed = cyfastlz.compress(text_bytes)
# Verify that the compressed text is actually smaller than the original
assert len(compressed) < len(text_bytes)
# Decompress the compressed text to reconstruct the original as a a bytes
reconstructed_bytes = cyfastlz.decompress(compressed)
# Convert back to a Python 3 unicode string
reconstructed = reconstructed_bytes.decode()
# Verify the reconstructed text is the same as the original
assert text == reconstructed
| [
"[email protected]"
] | |
2d90be2e1a6b6bc6c2fe03c9b529449c1127a540 | fd0604a74b72e273e194cae9145b4a299d2e1858 | /cogs/helpers/context.py | e5289d13bd928bfaecd26cf9b74ac09ac4cc12da | [] | no_license | DuckHunt-discord/DuckHunt-Community-Rewrite | 4610f0f5a503ae072b6092ddd031264842c6919c | c53ed8c994527052bcf588824ce6519280974056 | refs/heads/master | 2020-03-23T05:14:02.442187 | 2018-11-03T00:37:17 | 2018-11-03T00:37:17 | 141,131,990 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 776 | py | import logging
from discord.ext import commands
class CustomContext(commands.Context):
def __init__(self, **attrs):
super().__init__(**attrs)
@property
def logger(self):
# Copy that to log
if self.channel:
cname = self.channel.name
else:
cname = "PRIVATE_MESSAGE"
extra = {"channelname": f"#{cname}", "userid": f"{self.author.id}", "username": f"{self.author.name}#{self.author.discriminator}"}
logger = logging.LoggerAdapter(self.bot.base_logger, extra)
return logger
async def send_to(self, message, user=None, **kwargs):
if user is None:
user = self.author
message = f"{user.mention} > {message}"
await self.send(message, **kwargs)
| [
"[email protected]"
] | |
670cf57ba7c63646b289b947011568ca7a22145d | 9df2b97e7010995945565dc29c41a3fde41720d3 | /plugin.video.salts/default.py | f0c927345ce1bb40fd038689504f5aa516d904b9 | [] | no_license | Kodi-4-Beginners/tknorris-beta-repo | 8c00f5e1e80c09279c4f91794b9041a27d1815a0 | 9e76f396fb0832c2ef4f204fc95b63af6763f405 | refs/heads/master | 2021-01-17T07:55:26.934163 | 2015-02-04T02:49:58 | 2015-02-04T02:49:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 94,741 | py | """
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import os
import re
import datetime
import time
import xbmcplugin
import xbmcgui
import xbmc
import xbmcvfs
import urllib2
from addon.common.addon import Addon
from salts_lib.db_utils import DB_Connection
from salts_lib.url_dispatcher import URL_Dispatcher
from salts_lib.srt_scraper import SRT_Scraper
from salts_lib.trakt_api import Trakt_API, TransientTraktError
from salts_lib import utils
from salts_lib import log_utils
from salts_lib.constants import *
from scrapers import * # import all scrapers into this namespace
from scrapers import ScraperVideo
_SALTS = Addon('plugin.video.salts', sys.argv)
ICON_PATH = os.path.join(_SALTS.get_path(), 'icon.png')
username = _SALTS.get_setting('username')
password = _SALTS.get_setting('password')
TOKEN = utils.get_trakt_token()
use_https = _SALTS.get_setting('use_https') == 'true'
trakt_timeout = int(_SALTS.get_setting('trakt_timeout'))
list_size = int(_SALTS.get_setting('list_size'))
trakt_api = Trakt_API(username, password, TOKEN, use_https, list_size, trakt_timeout)
url_dispatcher = URL_Dispatcher()
db_connection = DB_Connection()
global urlresolver
@url_dispatcher.register(MODES.MAIN)
def main_menu():
db_connection.init_database()
if not TOKEN:
remind_count = int(_SALTS.get_setting('remind_count'))
remind_max = 5
if remind_count < remind_max:
remind_count += 1
log_utils.log('Showing Config reminder')
builtin = 'XBMC.Notification(%s,(%s/%s) Configure Trakt Account for more options, 7500, %s)'
xbmc.executebuiltin(builtin % (_SALTS.get_name(), remind_count, remind_max, ICON_PATH))
_SALTS.set_setting('remind_count', str(remind_count))
else:
_SALTS.set_setting('remind_count', '0')
if _SALTS.get_setting('auto-disable') != DISABLE_SETTINGS.OFF:
utils.do_disable_check()
_SALTS.add_directory({'mode': MODES.BROWSE, 'section': SECTIONS.MOVIES}, {'title': 'Movies'}, img=utils.art('movies.png'), fanart=utils.art('fanart.jpg'))
_SALTS.add_directory({'mode': MODES.BROWSE, 'section': SECTIONS.TV}, {'title': 'TV Shows'}, img=utils.art('television.png'), fanart=utils.art('fanart.jpg'))
_SALTS.add_directory({'mode': MODES.SETTINGS}, {'title': 'Settings'}, img=utils.art('settings.png'), fanart=utils.art('fanart.jpg'))
xbmcplugin.endOfDirectory(int(sys.argv[1]), cacheToDisc=False)
@url_dispatcher.register(MODES.SETTINGS)
def settings_menu():
_SALTS.add_directory({'mode': MODES.SCRAPERS}, {'title': 'Scraper Sort Order'}, img=utils.art('settings.png'), fanart=utils.art('fanart.jpg'))
_SALTS.add_directory({'mode': MODES.RES_SETTINGS}, {'title': 'Url Resolver Settings'}, img=utils.art('settings.png'), fanart=utils.art('fanart.jpg'))
_SALTS.add_directory({'mode': MODES.ADDON_SETTINGS}, {'title': 'Add-on Settings'}, img=utils.art('settings.png'), fanart=utils.art('fanart.jpg'))
_SALTS.add_directory({'mode': MODES.SHOW_VIEWS}, {'title': 'Set Default Views'}, img=utils.art('settings.png'), fanart=utils.art('fanart.jpg'))
_SALTS.add_directory({'mode': MODES.BROWSE_URLS}, {'title': 'Remove Cached Url(s)'}, img=utils.art('settings.png'), fanart=utils.art('fanart.jpg'))
xbmcplugin.endOfDirectory(int(sys.argv[1]))
@url_dispatcher.register(MODES.SHOW_VIEWS)
def show_views():
for content_type in ['movies', 'tvshows', 'seasons', 'episodes']:
_SALTS.add_directory({'mode': MODES.BROWSE_VIEW, 'content_type': content_type}, {'title': 'Set Default %s View' % (content_type.capitalize())}, img=utils.art('settings.png'), fanart=utils.art('fanart.jpg'))
xbmcplugin.endOfDirectory(int(sys.argv[1]))
@url_dispatcher.register(MODES.BROWSE_VIEW, ['content_type'])
def browse_view(content_type):
_SALTS.add_directory({'mode': MODES.SET_VIEW, 'content_type': content_type}, {'title': 'Set a view then select this item to set the default %s view' % (content_type.capitalize())}, img=utils.art('settings.png'), fanart=utils.art('fanart.jpg'))
utils.set_view(content_type, False)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
@url_dispatcher.register(MODES.SET_VIEW, ['content_type'])
def set_default_view(content_type):
current_view = utils.get_current_view()
if current_view:
_SALTS.set_setting('%s_view' % (content_type), current_view)
view_name = xbmc.getInfoLabel('Container.Viewmode')
builtin = "XBMC.Notification(Import,%s View Set to: %s,2000, %s)" % (content_type.capitalize(), view_name, ICON_PATH)
xbmc.executebuiltin(builtin)
@url_dispatcher.register(MODES.BROWSE_URLS)
def browse_urls():
urls = db_connection.get_all_urls(order_matters=True)
_SALTS.add_directory({'mode': MODES.FLUSH_CACHE}, {'title': '***Delete [B][COLOR red]ENTIRE[/COLOR][/B] Url Cache***'}, img=utils.art('settings.png'), fanart=utils.art('fanart.jpg'))
for url in urls:
_SALTS.add_directory({'mode': MODES.DELETE_URL, 'url': url[0]}, {'title': url[0]}, img=utils.art('settings.png'), fanart=utils.art('fanart.jpg'))
xbmcplugin.endOfDirectory(int(sys.argv[1]))
@url_dispatcher.register(MODES.DELETE_URL, ['url'])
def delete_url(url):
db_connection.delete_cached_url(url)
xbmc.executebuiltin("XBMC.Container.Refresh")
@url_dispatcher.register(MODES.RES_SETTINGS)
def resolver_settings():
global urlresolver
import urlresolver
urlresolver.display_settings()
@url_dispatcher.register(MODES.ADDON_SETTINGS)
def addon_settings():
_SALTS.show_settings()
@url_dispatcher.register(MODES.BROWSE, ['section'])
def browse_menu(section):
if section == SECTIONS.TV:
section_label = 'TV Shows'
search_img = 'television_search.png'
else:
section_label = 'Movies'
search_img = 'movies_search.png'
if utils.menu_on('trending'): _SALTS.add_directory({'mode': MODES.TRENDING, 'section': section}, {'title': 'Trending %s' % (section_label)}, img=utils.art('trending.png'), fanart=utils.art('fanart.jpg'))
if utils.menu_on('popular'): _SALTS.add_directory({'mode': MODES.POPULAR, 'section': section}, {'title': 'Popular %s' % (section_label)}, img=utils.art('popular.png'), fanart=utils.art('fanart.jpg'))
if utils.menu_on('recent'): _SALTS.add_directory({'mode': MODES.RECENT, 'section': section}, {'title': 'Recently Updated %s' % (section_label)}, img=utils.art('recent.png'), fanart=utils.art('fanart.jpg'))
if TOKEN:
if utils.menu_on('recommended'): _SALTS.add_directory({'mode': MODES.RECOMMEND, 'section': section}, {'title': 'Recommended %s' % (section_label)}, img=utils.art('recommended.png'), fanart=utils.art('fanart.jpg'))
if utils.menu_on('collection'): add_refresh_item({'mode': MODES.SHOW_COLLECTION, 'section': section}, 'My %s Collection' % (section_label[:-1]), utils.art('collection.png'), utils.art('fanart.jpg'))
if utils.menu_on('favorites'): _SALTS.add_directory({'mode': MODES.SHOW_FAVORITES, 'section': section}, {'title': 'My Favorites'}, img=utils.art('my_favorites.png'), fanart=utils.art('fanart.jpg'))
if utils.menu_on('subscriptions'): _SALTS.add_directory({'mode': MODES.MANAGE_SUBS, 'section': section}, {'title': 'My Subscriptions'}, img=utils.art('my_subscriptions.png'), fanart=utils.art('fanart.jpg'))
if utils.menu_on('watchlist'): _SALTS.add_directory({'mode': MODES.SHOW_WATCHLIST, 'section': section}, {'title': 'My Watchlist'}, img=utils.art('my_watchlist.png'), fanart=utils.art('fanart.jpg'))
if utils.menu_on('my_lists'): _SALTS.add_directory({'mode': MODES.MY_LISTS, 'section': section}, {'title': 'My Lists'}, img=utils.art('my_lists.png'), fanart=utils.art('fanart.jpg'))
# if utils.menu_on('other_lists'): _SALTS.add_directory({'mode': MODES.OTHER_LISTS, 'section': section}, {'title': 'Other Lists'}, img=utils.art('other_lists.png'), fanart=utils.art('fanart.jpg'))
if section == SECTIONS.TV:
if TOKEN:
if utils.menu_on('progress'): add_refresh_item({'mode': MODES.SHOW_PROGRESS}, 'My Next Episodes', utils.art('my_progress.png'), utils.art('fanart.jpg'))
if utils.menu_on('my_cal'): add_refresh_item({'mode': MODES.MY_CAL}, 'My Calendar', utils.art('my_calendar.png'), utils.art('fanart.jpg'))
if utils.menu_on('general_cal'): add_refresh_item({'mode': MODES.CAL}, 'General Calendar', utils.art('calendar.png'), utils.art('fanart.jpg'))
if utils.menu_on('premiere_cal'): add_refresh_item({'mode': MODES.PREMIERES}, 'Premiere Calendar', utils.art('premiere_calendar.png'), utils.art('fanart.jpg'))
# if TOKEN:
# if utils.menu_on('friends'): add_refresh_item({'mode': MODES.FRIENDS_EPISODE, 'section': section}, 'Friends Episode Activity [COLOR red][I](Temporarily Broken)[/I][/COLOR]', utils.art('friends_episode.png'), utils.art('fanart.jpg'))
# if TOKEN:
# if utils.menu_on('friends'): add_refresh_item({'mode': MODES.FRIENDS, 'section': section}, 'Friends Activity [COLOR red][I](Temporarily Broken)[/I][/COLOR]', utils.art('friends.png'), utils.art('fanart.jpg'))
if utils.menu_on('search'): _SALTS.add_directory({'mode': MODES.SEARCH, 'section': section}, {'title': 'Search'}, img=utils.art(search_img), fanart=utils.art('fanart.jpg'))
if utils.menu_on('search'): _SALTS.add_directory({'mode': MODES.RECENT_SEARCH, 'section': section}, {'title': 'Recent Searches'}, img=utils.art(search_img), fanart=utils.art('fanart.jpg'))
if utils.menu_on('search'): _SALTS.add_directory({'mode': MODES.SAVED_SEARCHES, 'section': section}, {'title': 'Saved Searches'}, img=utils.art(search_img), fanart=utils.art('fanart.jpg'))
xbmcplugin.endOfDirectory(int(sys.argv[1]))
def add_refresh_item(queries, label, thumb, fanart):
liz = xbmcgui.ListItem(label, iconImage=thumb, thumbnailImage=thumb)
liz.setProperty('fanart_image', fanart)
menu_items = []
refresh_queries = {'mode': MODES.FORCE_REFRESH, 'refresh_mode': queries['mode']}
if 'section' in queries: refresh_queries.update({'section': queries['section']})
menu_items.append(('Force Refresh', 'RunPlugin(%s)' % (_SALTS.build_plugin_url(refresh_queries))),)
liz.addContextMenuItems(menu_items)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), _SALTS.build_plugin_url(queries), liz, isFolder=True)
@url_dispatcher.register(MODES.FORCE_REFRESH, ['refresh_mode'], ['section', 'slug', 'username'])
def force_refresh(refresh_mode, section=None, slug=None, username=None):
builtin = "XBMC.Notification(%s,Forcing Refresh, 2000, %s)" % (_SALTS.get_name(), ICON_PATH)
xbmc.executebuiltin(builtin)
log_utils.log('Forcing refresh for mode: |%s|%s|%s|%s|' % (refresh_mode, section, slug, username))
now = datetime.datetime.now()
offset = int(_SALTS.get_setting('calendar-day'))
start_date = now + datetime.timedelta(days=offset)
start_date = datetime.datetime.strftime(start_date, '%Y-%m-%d')
if refresh_mode == MODES.SHOW_COLLECTION:
trakt_api.get_collection(section, cached=False)
elif refresh_mode == MODES.SHOW_PROGRESS:
get_progress(cache_override=True)
elif refresh_mode == MODES.MY_CAL:
trakt_api.get_my_calendar(start_date, cached=False)
elif refresh_mode == MODES.CAL:
trakt_api.get_calendar(start_date, cached=False)
elif refresh_mode == MODES.PREMIERES:
trakt_api.get_premieres(start_date, cached=False)
elif refresh_mode == MODES.FRIENDS_EPISODE:
trakt_api.get_friends_activity(section, True)
elif refresh_mode == MODES.FRIENDS:
trakt_api.get_friends_activity(section)
elif refresh_mode == MODES.SHOW_LIST:
trakt_api.show_list(slug, section, username, cached=False)
else:
log_utils.log('Force refresh on unsupported mode: |%s|' % (refresh_mode))
return
log_utils.log('Force refresh complete: |%s|%s|%s|%s|' % (refresh_mode, section, slug, username))
builtin = "XBMC.Notification(%s,Force Refresh Complete, 2000, %s)" % (_SALTS.get_name(), ICON_PATH)
xbmc.executebuiltin(builtin)
@url_dispatcher.register(MODES.SCRAPERS)
def scraper_settings():
scrapers = utils.relevant_scrapers(None, True, True)
if _SALTS.get_setting('toggle_enable') == 'true':
label = '**Enable All Scrapers**'
else:
label = '**Disable All Scrapers**'
_SALTS.add_directory({'mode': MODES.TOGGLE_ALL}, {'title': label}, img=utils.art('scraper.png'), fanart=utils.art('fanart.jpg'))
for i, cls in enumerate(scrapers):
label = '%s (Provides: %s)' % (cls.get_name(), str(list(cls.provides())).replace("'", ""))
label = '%s (Success: %s%%)' % (label, utils.calculate_success(cls.get_name()))
if not utils.scraper_enabled(cls.get_name()):
label = '[COLOR darkred]%s[/COLOR]' % (label)
toggle_label = 'Enable Scraper'
else:
toggle_label = 'Disable Scraper'
liz = xbmcgui.ListItem(label=label, iconImage=utils.art('scraper.png'), thumbnailImage=utils.art('scraper.png'))
liz.setProperty('fanart_image', utils.art('fanart.jpg'))
liz.setProperty('IsPlayable', 'false')
liz.setInfo('video', {'title': label})
liz_url = _SALTS.build_plugin_url({'mode': MODES.TOGGLE_SCRAPER, 'name': cls.get_name()})
menu_items = []
if i > 0:
queries = {'mode': MODES.MOVE_SCRAPER, 'name': cls.get_name(), 'direction': DIRS.UP, 'other': scrapers[i - 1].get_name()}
menu_items.append(('Move Up', 'RunPlugin(%s)' % (_SALTS.build_plugin_url(queries))),)
if i < len(scrapers) - 1:
queries = {'mode': MODES.MOVE_SCRAPER, 'name': cls.get_name(), 'direction': DIRS.DOWN, 'other': scrapers[i + 1].get_name()}
menu_items.append(('Move Down', 'RunPlugin(%s)' % (_SALTS.build_plugin_url(queries))),)
queries = {'mode': MODES.MOVE_TO, 'name': cls.get_name()}
menu_items.append(('Move To...', 'RunPlugin(%s)' % (_SALTS.build_plugin_url(queries))),)
queries = {'mode': MODES.TOGGLE_SCRAPER, 'name': cls.get_name()}
menu_items.append((toggle_label, 'RunPlugin(%s)' % (_SALTS.build_plugin_url(queries))),)
liz.addContextMenuItems(menu_items, replaceItems=True)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), liz_url, liz, isFolder=False)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
@url_dispatcher.register(MODES.MOVE_TO, ['name'])
def move_to(name):
dialog = xbmcgui.Dialog()
sort_key = utils.make_source_sort_key()
new_pos = dialog.numeric(0, 'Enter New Position (1 - %s)' % (len(sort_key)))
if new_pos:
new_pos = int(new_pos)
old_key = sort_key[name]
new_key = -new_pos + 1
if (new_pos <= 0 or new_pos > len(sort_key)) or old_key == new_key:
return
for key in sort_key:
this_key = sort_key[key]
# moving scraper up
if new_key > old_key:
# move everything between the old and new down
if this_key > old_key and this_key <= new_key:
sort_key[key] -= 1
# moving scraper down
else:
# move everything between the old and new up
if this_key > new_key and this_key <= new_key:
sort_key[key] += 1
sort_key[name] = new_key
_SALTS.set_setting('source_sort_order', utils.make_source_sort_string(sort_key))
xbmc.executebuiltin("XBMC.Container.Refresh")
@url_dispatcher.register(MODES.MOVE_SCRAPER, ['name', 'direction', 'other'])
def move_scraper(name, direction, other):
sort_key = utils.make_source_sort_key()
if direction == DIRS.UP:
sort_key[name] += 1
sort_key[other] -= 1
elif direction == DIRS.DOWN:
sort_key[name] -= 1
sort_key[other] += 1
_SALTS.set_setting('source_sort_order', utils.make_source_sort_string(sort_key))
xbmc.executebuiltin("XBMC.Container.Refresh")
@url_dispatcher.register(MODES.TOGGLE_ALL)
def toggle_scrapers():
cur_toggle = _SALTS.get_setting('toggle_enable')
scrapers = utils.relevant_scrapers(None, True, True)
for scraper in scrapers:
_SALTS.set_setting('%s-enable' % (scraper.get_name()), cur_toggle)
new_toggle = 'false' if cur_toggle == 'true' else 'true'
_SALTS.set_setting('toggle_enable', new_toggle)
xbmc.executebuiltin("XBMC.Container.Refresh")
@url_dispatcher.register(MODES.TOGGLE_SCRAPER, ['name'])
def toggle_scraper(name):
if utils.scraper_enabled(name):
setting = 'false'
else:
setting = 'true'
_SALTS.set_setting('%s-enable' % (name), setting)
xbmc.executebuiltin("XBMC.Container.Refresh")
@url_dispatcher.register(MODES.TRENDING, ['section'], ['page'])
def browse_trending(section, page=1):
list_data = trakt_api.get_trending(section, page)
make_dir_from_list(section, list_data, query={'mode': MODES.TRENDING, 'section': section}, page=page)
@url_dispatcher.register(MODES.POPULAR, ['section'], ['page'])
def browse_popular(section, page=1):
list_data = trakt_api.get_popular(section, page)
make_dir_from_list(section, list_data, query={'mode': MODES.POPULAR, 'section': section}, page=page)
@url_dispatcher.register(MODES.RECENT, ['section'], ['page'])
def browse_recent(section, page=1):
now = datetime.datetime.now()
start_date = now - datetime.timedelta(days=7)
start_date = datetime.datetime.strftime(start_date, '%Y-%m-%d')
list_data = trakt_api.get_recent(section, start_date, page)
make_dir_from_list(section, list_data, query={'mode': MODES.RECENT, 'section': section}, page=page)
@url_dispatcher.register(MODES.RECOMMEND, ['section'])
def browse_recommendations(section):
list_data = trakt_api.get_recommendations(section)
make_dir_from_list(section, list_data)
@url_dispatcher.register(MODES.FRIENDS, ['mode', 'section'])
@url_dispatcher.register(MODES.FRIENDS_EPISODE, ['mode', 'section'])
def browse_friends(mode, section):
section_params = utils.get_section_params(section)
activities = trakt_api.get_friends_activity(section, mode == MODES.FRIENDS_EPISODE)
totalItems = len(activities)
for activity in activities['activity']:
if 'episode' in activity:
show = activity['show']
liz, liz_url = make_episode_item(show, activity['episode'], show_subs=False)
folder = (liz.getProperty('isPlayable') != 'true')
label = liz.getLabel()
label = '%s (%s) - %s' % (show['title'], show['year'], label.decode('utf-8', 'replace'))
liz.setLabel(label)
else:
liz, liz_url = make_item(section_params, activity[TRAKT_SECTIONS[section][:-1]])
folder = section_params['folder']
label = liz.getLabel()
action = ' [[COLOR blue]%s[/COLOR] [COLOR green]%s' % (activity['user']['username'], activity['action'])
if activity['action'] == 'rating': action += ' - %s' % (activity['rating'])
action += '[/COLOR]]'
label = '%s %s' % (action, label.decode('utf-8', 'replace'))
liz.setLabel(label)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), liz_url, liz, isFolder=folder, totalItems=totalItems)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
@url_dispatcher.register(MODES.MY_CAL, ['mode'], ['start_date'])
@url_dispatcher.register(MODES.CAL, ['mode'], ['start_date'])
@url_dispatcher.register(MODES.PREMIERES, ['mode'], ['start_date'])
def browse_calendar(mode, start_date=None):
if start_date is None:
now = datetime.datetime.now()
offset = int(_SALTS.get_setting('calendar-day'))
start_date = now + datetime.timedelta(days=offset)
start_date = datetime.datetime.strftime(start_date, '%Y-%m-%d')
if mode == MODES.MY_CAL:
days = trakt_api.get_my_calendar(start_date)
elif mode == MODES.CAL:
days = trakt_api.get_calendar(start_date)
elif mode == MODES.PREMIERES:
days = trakt_api.get_premieres(start_date)
make_dir_from_cal(mode, start_date, days)
@url_dispatcher.register(MODES.MY_LISTS, ['section'])
def browse_lists(section):
lists = trakt_api.get_lists()
lists.insert(0, {'name': 'watchlist', 'ids': {'slug': utils.WATCHLIST_SLUG}})
totalItems = len(lists)
for user_list in lists:
ids = user_list['ids']
liz = xbmcgui.ListItem(label=user_list['name'], iconImage=utils.art('list.png'), thumbnailImage=utils.art('list.png'))
liz.setProperty('fanart_image', utils.art('fanart.jpg'))
queries = {'mode': MODES.SHOW_LIST, 'section': section, 'slug': ids['slug']}
liz_url = _SALTS.build_plugin_url(queries)
menu_items = []
queries = {'mode': MODES.SET_FAV_LIST, 'slug': ids['slug'], 'section': section}
menu_items.append(('Set as Favorites List', 'RunPlugin(%s)' % (_SALTS.build_plugin_url(queries))),)
queries = {'mode': MODES.SET_SUB_LIST, 'slug': ids['slug'], 'section': section}
menu_items.append(('Set as Subscription List', 'RunPlugin(%s)' % (_SALTS.build_plugin_url(queries))),)
queries = {'mode': MODES.COPY_LIST, 'slug': COLLECTION_SLUG, 'section': section, 'target_slug': ids['slug']}
menu_items.append(('Import from Collection', 'RunPlugin(%s)' % (_SALTS.build_plugin_url(queries))),)
liz.addContextMenuItems(menu_items, replaceItems=True)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), liz_url, liz, isFolder=True, totalItems=totalItems)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
@url_dispatcher.register(MODES.OTHER_LISTS, ['section'])
def browse_other_lists(section):
liz = xbmcgui.ListItem(label='Add another user\'s list', iconImage=utils.art('add_other.png'), thumbnailImage=utils.art('add_other.png'))
liz.setProperty('fanart_image', utils.art('fanart.jpg'))
liz_url = _SALTS.build_plugin_url({'mode': MODES.ADD_OTHER_LIST, 'section': section})
xbmcplugin.addDirectoryItem(int(sys.argv[1]), liz_url, liz, isFolder=False)
lists = db_connection.get_other_lists(section)
totalItems = len(lists)
for other_list in lists:
try:
header = trakt_api.get_list_header(other_list[1], other_list[0])
except urllib2.HTTPError as e:
if e.code == 404:
header = None
else:
raise
if header:
found = True
if other_list[2]:
name = other_list[2]
else:
name = header['name']
else:
name = other_list[1]
found = False
label = '[[COLOR blue]%s[/COLOR]] %s' % (other_list[0], name)
liz = xbmcgui.ListItem(label=label, iconImage=utils.art('list.png'), thumbnailImage=utils.art('list.png'))
liz.setProperty('fanart_image', utils.art('fanart.jpg'))
if found:
queries = {'mode': MODES.SHOW_LIST, 'section': section, 'slug': other_list[1], 'username': other_list[0]}
else:
queries = {'mode': MODES.OTHER_LISTS, 'section': section}
liz_url = _SALTS.build_plugin_url(queries)
menu_items = []
if found:
queries = {'mode': MODES.FORCE_REFRESH, 'refresh_mode': MODES.SHOW_LIST, 'section': section, 'slug': other_list[1], 'username': other_list[0]}
menu_items.append(('Force Refresh', 'RunPlugin(%s)' % (_SALTS.build_plugin_url(queries))),)
queries = {'mode': MODES.COPY_LIST, 'section': section, 'slug': other_list[1], 'username': other_list[0]}
menu_items.append(('Copy to My List', 'RunPlugin(%s)' % (_SALTS.build_plugin_url(queries))),)
queries = {'mode': MODES.ADD_OTHER_LIST, 'section': section, 'username': other_list[0]}
menu_items.append(('Add more from %s' % (other_list[0]), 'RunPlugin(%s)' % (_SALTS.build_plugin_url(queries))),)
queries = {'mode': MODES.REMOVE_LIST, 'section': section, 'slug': other_list[1], 'username': other_list[0]}
menu_items.append(('Remove List', 'RunPlugin(%s)' % (_SALTS.build_plugin_url(queries))),)
queries = {'mode': MODES.RENAME_LIST, 'section': section, 'slug': other_list[1], 'username': other_list[0], 'name': name}
menu_items.append(('Rename List', 'RunPlugin(%s)' % (_SALTS.build_plugin_url(queries))),)
liz.addContextMenuItems(menu_items, replaceItems=True)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), liz_url, liz, isFolder=True, totalItems=totalItems)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
@url_dispatcher.register(MODES.REMOVE_LIST, ['section', 'username', 'slug'])
def remove_list(section, username, slug):
db_connection.delete_other_list(section, username, slug)
xbmc.executebuiltin("XBMC.Container.Refresh")
@url_dispatcher.register(MODES.RENAME_LIST, ['section', 'slug', 'username', 'name'])
def rename_list(section, slug, username, name):
keyboard = xbmc.Keyboard()
keyboard.setHeading('Enter the new name (blank to reset)')
keyboard.setDefault(name)
keyboard.doModal()
if keyboard.isConfirmed():
new_name = keyboard.getText()
db_connection.rename_other_list(section, username, slug, new_name)
xbmc.executebuiltin("XBMC.Container.Refresh")
@url_dispatcher.register(MODES.ADD_OTHER_LIST, ['section'], ['username'])
def add_other_list(section, username=None):
if username is None:
keyboard = xbmc.Keyboard()
keyboard.setHeading('Enter username of list owner')
keyboard.doModal()
if keyboard.isConfirmed():
username = keyboard.getText()
slug = pick_list(None, section, username)
if slug:
db_connection.add_other_list(section, username, slug)
xbmc.executebuiltin("XBMC.Container.Refresh")
@url_dispatcher.register(MODES.SHOW_LIST, ['section', 'slug'], ['username'])
def show_list(section, slug, username=None):
if slug == utils.WATCHLIST_SLUG:
items = trakt_api.show_watchlist(section)
else:
items = trakt_api.show_list(slug, section, username)
make_dir_from_list(section, items, slug)
@url_dispatcher.register(MODES.SHOW_WATCHLIST, ['section'])
def show_watchlist(section):
show_list(section, utils.WATCHLIST_SLUG)
@url_dispatcher.register(MODES.SHOW_COLLECTION, ['section'])
def show_collection(section):
items = trakt_api.get_collection(section, cached=_SALTS.get_setting('cache_collection') == 'true')
sort_key = int(_SALTS.get_setting('sort_collection'))
if sort_key == 1:
items.reverse()
elif sort_key > 0:
items.sort(key=lambda x: x[['title', 'year'][sort_key - 2]])
make_dir_from_list(section, items, COLLECTION_SLUG)
def get_progress(cache_override=False):
cached = _SALTS.get_setting('cache_watched') == 'true' and not cache_override
max_progress = int(_SALTS.get_setting('progress_size'))
watched_list = trakt_api.get_watched(SECTIONS.TV, full=True, cached=cached)
episodes = []
for i, watched in enumerate(watched_list):
if i != 0 and i >= max_progress:
break
progress = trakt_api.get_show_progress(watched['show']['ids']['slug'], full=True, cached=cached)
if 'next_episode' in progress and progress['next_episode']:
episode = {'show': watched['show'], 'episode': progress['next_episode']}
episode['last_watched_at'] = watched['last_watched_at']
episode['percent_completed'] = (progress['completed'] * 100) / progress['aired'] if progress['aired'] > 0 else 0
episode['completed'] = progress['completed']
episodes.append(episode)
return utils.sort_progress(episodes, sort_order=SORT_MAP[int(_SALTS.get_setting('sort_progress'))])
@url_dispatcher.register(MODES.SHOW_PROGRESS)
def show_progress():
for episode in get_progress():
log_utils.log('Episode: Sort Keys: Tile: |%s| Last Watched: |%s| Percent: |%s%%| Completed: |%s|' % (episode['show']['title'], episode['last_watched_at'], episode['percent_completed'], episode['completed']), xbmc.LOGDEBUG)
first_aired_utc = utils.iso_2_utc(episode['episode']['first_aired'])
if _SALTS.get_setting('show_unaired_next') == 'true' or first_aired_utc <= time.time():
show = episode['show']
fanart = show['images']['fanart']['full']
date = utils.make_day(time.strftime('%Y-%m-%d', time.localtime(first_aired_utc)))
menu_items = []
queries = {'mode': MODES.SEASONS, 'slug': show['ids']['slug'], 'fanart': fanart}
menu_items.append(('Browse Seasons', 'Container.Update(%s)' % (_SALTS.build_plugin_url(queries))),)
liz, liz_url = make_episode_item(show, episode['episode'], menu_items=menu_items)
label = liz.getLabel()
label = '[[COLOR deeppink]%s[/COLOR]] %s - %s' % (date, show['title'], label.decode('utf-8', 'replace'))
liz.setLabel(label)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), liz_url, liz, isFolder=(liz.getProperty('isPlayable') != 'true'))
xbmcplugin.endOfDirectory(int(sys.argv[1]), cacheToDisc=False)
@url_dispatcher.register(MODES.MANAGE_SUBS, ['section'])
def manage_subscriptions(section):
slug = _SALTS.get_setting('%s_sub_slug' % (section))
if slug:
next_run = utils.get_next_run(MODES.UPDATE_SUBS)
label = 'Update Subscriptions: (Next Run: [COLOR %s]%s[/COLOR])'
if _SALTS.get_setting('auto-' + MODES.UPDATE_SUBS) == 'true':
color = 'green'
run_str = next_run.strftime("%Y-%m-%d %I:%M:%S %p")
else:
color = 'red'
run_str = 'DISABLED'
label = label % (color, run_str)
liz = xbmcgui.ListItem(label=label, iconImage=utils.art('update_subscriptions.png'), thumbnailImage=utils.art('update_subscriptions.png'))
liz.setProperty('fanart_image', utils.art('fanart.jpg'))
liz_url = _SALTS.build_plugin_url({'mode': MODES.UPDATE_SUBS, 'section': section})
xbmcplugin.addDirectoryItem(int(sys.argv[1]), liz_url, liz, isFolder=False)
if section == SECTIONS.TV:
liz = xbmcgui.ListItem(label='Clean-Up Subscriptions', iconImage=utils.art('clean_up.png'), thumbnailImage=utils.art('clean_up.png'))
liz.setProperty('fanart_image', utils.art('fanart.jpg'))
liz_url = _SALTS.build_plugin_url({'mode': MODES.CLEAN_SUBS})
xbmcplugin.addDirectoryItem(int(sys.argv[1]), liz_url, liz, isFolder=False)
show_pickable_list(slug, 'Pick a list to use for Subscriptions', MODES.PICK_SUB_LIST, section)
@url_dispatcher.register(MODES.SHOW_FAVORITES, ['section'])
def show_favorites(section):
slug = _SALTS.get_setting('%s_fav_slug' % (section))
show_pickable_list(slug, 'Pick a list to use for Favorites', MODES.PICK_FAV_LIST, section)
@url_dispatcher.register(MODES.PICK_SUB_LIST, ['mode', 'section'])
@url_dispatcher.register(MODES.PICK_FAV_LIST, ['mode', 'section'])
def pick_list(mode, section, username=None):
slug = utils.choose_list(username)
if slug:
if mode == MODES.PICK_FAV_LIST:
set_list(MODES.SET_FAV_LIST, slug, section)
elif mode == MODES.PICK_SUB_LIST:
set_list(MODES.SET_SUB_LIST, slug, section)
else:
return slug
xbmc.executebuiltin("XBMC.Container.Refresh")
@url_dispatcher.register(MODES.SET_SUB_LIST, ['mode', 'slug', 'section'])
@url_dispatcher.register(MODES.SET_FAV_LIST, ['mode', 'slug', 'section'])
def set_list(mode, slug, section):
if mode == MODES.SET_FAV_LIST:
setting = '%s_fav_slug' % (section)
elif mode == MODES.SET_SUB_LIST:
setting = '%s_sub_slug' % (section)
_SALTS.set_setting(setting, slug)
@url_dispatcher.register(MODES.SEARCH, ['section'])
def search(section, search_text=None):
keyboard = xbmc.Keyboard()
keyboard.setHeading('Search %s' % (section))
while True:
keyboard.doModal()
if keyboard.isConfirmed():
search_text = keyboard.getText()
if not search_text:
msg = 'Blank Searches are not allowed'
builtin = 'XBMC.Notification(%s,%s, 5000, %s)'
xbmc.executebuiltin(builtin % (_SALTS.get_name(), msg, ICON_PATH))
return
else:
break
else:
break
if keyboard.isConfirmed():
search_text = keyboard.getText()
utils.keep_search(section, search_text)
queries = {'mode': MODES.SEARCH_RESULTS, 'section': section, 'query': search_text}
pluginurl = _SALTS.build_plugin_url(queries)
builtin = 'Container.Update(%s)' % (pluginurl)
xbmc.executebuiltin(builtin)
@url_dispatcher.register(MODES.RECENT_SEARCH, ['section'])
def recent_searches(section):
if section == SECTIONS.TV:
search_img = 'television_search.png'
else:
search_img = 'movies_search.png'
head = int(_SALTS.get_setting('%s_search_head' % (section)))
for i in reversed(range(0, SEARCH_HISTORY)):
index = (i + head + 1) % SEARCH_HISTORY
search_text = db_connection.get_setting('%s_search_%s' % (section, index))
if not search_text:
break
label = '[%s Search] %s' % (section, search_text)
liz = xbmcgui.ListItem(label=label, iconImage=utils.art(search_img), thumbnailImage=utils.art(search_img))
liz.setProperty('fanart_image', utils.art('fanart.png'))
menu_items = []
refresh_queries = {'mode': MODES.SAVE_SEARCH, 'section': section, 'query': search_text}
menu_items.append(('Save Search', 'RunPlugin(%s)' % (_SALTS.build_plugin_url(refresh_queries))),)
liz.addContextMenuItems(menu_items)
queries = {'mode': MODES.SEARCH_RESULTS, 'section': section, 'query': search_text}
xbmcplugin.addDirectoryItem(int(sys.argv[1]), _SALTS.build_plugin_url(queries), liz, isFolder=True)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
@url_dispatcher.register(MODES.SAVED_SEARCHES, ['section'])
def saved_searches(section):
if section == SECTIONS.TV:
search_img = 'television_search.png'
else:
search_img = 'movies_search.png'
for search in db_connection.get_searches(section, order_matters=True):
label = '[%s Search] %s' % (section, search[1])
liz = xbmcgui.ListItem(label=label, iconImage=utils.art(search_img), thumbnailImage=utils.art(search_img))
liz.setProperty('fanart_image', utils.art('fanart.png'))
menu_items = []
refresh_queries = {'mode': MODES.DELETE_SEARCH, 'search_id': search[0]}
menu_items.append(('Delete Search', 'RunPlugin(%s)' % (_SALTS.build_plugin_url(refresh_queries))),)
liz.addContextMenuItems(menu_items)
queries = {'mode': MODES.SEARCH_RESULTS, 'section': section, 'query': search[1]}
xbmcplugin.addDirectoryItem(int(sys.argv[1]), _SALTS.build_plugin_url(queries), liz, isFolder=True)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
@url_dispatcher.register(MODES.SAVE_SEARCH, ['section', 'query'])
def save_search(section, query):
db_connection.save_search(section, query)
@url_dispatcher.register(MODES.DELETE_SEARCH, ['search_id'])
def delete_search(search_id):
db_connection.delete_search(search_id)
xbmc.executebuiltin("XBMC.Container.Refresh")
@url_dispatcher.register(MODES.SEARCH_RESULTS, ['section', 'query'], ['page'])
def search_results(section, query, page=1):
results = trakt_api.search(section, query, page)
make_dir_from_list(section, results, query={'mode': MODES.SEARCH_RESULTS, 'section': section, 'query': query}, page=page)
@url_dispatcher.register(MODES.SEASONS, ['slug', 'fanart'])
def browse_seasons(slug, fanart):
seasons = trakt_api.get_seasons(slug)
info = {}
if TOKEN:
progress = trakt_api.get_show_progress(slug, cached=_SALTS.get_setting('cache_watched') == 'true')
info = utils.make_seasons_info(progress)
totalItems = len(seasons)
for season in seasons:
if _SALTS.get_setting('show_season0') == 'true' or season['number'] != 0:
liz = make_season_item(season, info.get(str(season['number']), {'season': season['number']}), slug, fanart)
queries = {'mode': MODES.EPISODES, 'slug': slug, 'season': season['number']}
liz_url = _SALTS.build_plugin_url(queries)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), liz_url, liz, isFolder=True, totalItems=totalItems)
utils.set_view(CONTENT_TYPES.SEASONS, False)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
@url_dispatcher.register(MODES.EPISODES, ['slug', 'season'])
def browse_episodes(slug, season):
show = trakt_api.get_show_details(slug)
episodes = trakt_api.get_episodes(slug, season)
if TOKEN:
progress = trakt_api.get_show_progress(slug, cached=_SALTS.get_setting('cache_watched') == 'true')
episodes = utils.make_episodes_watched(episodes, progress)
totalItems = len(episodes)
now = time.time()
for episode in episodes:
utc_air_time = utils.iso_2_utc(episode['first_aired'])
if _SALTS.get_setting('show_unaired') == 'true' or utc_air_time <= now:
if _SALTS.get_setting('show_unknown') == 'true' or utc_air_time:
liz, liz_url = make_episode_item(show, episode)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), liz_url, liz, isFolder=(liz.getProperty('isPlayable') != 'true'), totalItems=totalItems)
utils.set_view(CONTENT_TYPES.EPISODES, False)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
@url_dispatcher.register(MODES.GET_SOURCES, ['mode', 'video_type', 'title', 'year', 'slug'], ['season', 'episode', 'ep_title', 'dialog'])
@url_dispatcher.register(MODES.SELECT_SOURCE, ['mode', 'video_type', 'title', 'year', 'slug'], ['season', 'episode', 'ep_title'])
@url_dispatcher.register(MODES.DOWNLOAD_SOURCE, ['mode', 'video_type', 'title', 'year', 'slug'], ['season', 'episode', 'ep_title'])
def get_sources(mode, video_type, title, year, slug, season='', episode='', ep_title='', dialog=None):
timeout = max_timeout = int(_SALTS.get_setting('source_timeout'))
if max_timeout == 0: timeout = None
max_results = int(_SALTS.get_setting('source_results'))
worker_count = 0
hosters = []
workers = []
video = ScraperVideo(video_type, title, year, slug, season, episode, ep_title)
if utils.P_MODE != P_MODES.NONE: q = utils.Queue()
begin = time.time()
fails = {}
got_timeouts = False
for cls in utils.relevant_scrapers(video_type):
if utils.P_MODE == P_MODES.NONE:
hosters += cls(max_timeout).get_sources(video)
if max_results > 0 and len(hosters) >= max_results:
break
else:
worker = utils.start_worker(q, utils.parallel_get_sources, [cls, video])
utils.increment_setting('%s_try' % (cls.get_name()))
worker_count += 1
workers.append(worker)
fails[cls.get_name()] = True
# collect results from workers
if utils.P_MODE != P_MODES.NONE:
while worker_count > 0:
try:
log_utils.log('Calling get with timeout: %s' % (timeout), xbmc.LOGDEBUG)
result = q.get(True, timeout)
log_utils.log('Got %s Source Results' % (len(result['hosters'])), xbmc.LOGDEBUG)
worker_count -= 1
hosters += result['hosters']
del fails[result['name']]
if max_timeout > 0:
timeout = max_timeout - (time.time() - begin)
if timeout < 0: timeout = 0
except utils.Empty:
log_utils.log('Get Sources Process Timeout', xbmc.LOGWARNING)
utils.record_timeouts(fails)
got_timeouts = True
break
if max_results > 0 and len(hosters) >= max_results:
log_utils.log('Exceeded max results: %s/%s' % (max_results, len(hosters)))
break
else:
got_timeouts = False
log_utils.log('All source results received')
total = len(workers)
timeouts = len(fails)
workers = utils.reap_workers(workers)
try:
timeout_msg = 'Scraper Timeouts: %s/%s' % (timeouts, total) if got_timeouts and timeouts else ''
if not hosters:
log_utils.log('No Sources found for: |%s|' % (video))
msg = ' (%s)' % timeout_msg if timeout_msg else ''
builtin = 'XBMC.Notification(%s,No Sources Found%s, 5000, %s)'
xbmc.executebuiltin(builtin % (_SALTS.get_name(), msg, ICON_PATH))
return False
if timeout_msg:
builtin = 'XBMC.Notification(%s,%s, 5000, %s)'
xbmc.executebuiltin(builtin % (_SALTS.get_name(), timeout_msg, ICON_PATH))
hosters = utils.filter_exclusions(hosters)
hosters = utils.filter_quality(video_type, hosters)
if _SALTS.get_setting('enable_sort') == 'true':
if _SALTS.get_setting('filter-unknown') == 'true':
hosters = utils.filter_unknown_hosters(hosters)
SORT_KEYS['source'] = utils.make_source_sort_key()
hosters.sort(key=utils.get_sort_key)
global urlresolver
import urlresolver
hosters = filter_unusable_hosters(hosters)
if not hosters:
log_utils.log('No Useable Sources found for: |%s|' % (video))
msg = ' (%s)' % timeout_msg if timeout_msg else ''
builtin = 'XBMC.Notification(%s,No Useable Sources Found%s, 5000, %s)'
xbmc.executebuiltin(builtin % (_SALTS.get_name(), msg, ICON_PATH))
return False
pseudo_tv = xbmcgui.Window(10000).getProperty('PseudoTVRunning')
if pseudo_tv == 'True' or (mode == MODES.GET_SOURCES and _SALTS.get_setting('auto-play') == 'true'):
auto_play_sources(hosters, video_type, slug, season, episode)
else:
if dialog or (dialog is None and _SALTS.get_setting('source-win') == 'Dialog'):
stream_url = pick_source_dialog(hosters)
return play_source(mode, stream_url, video_type, slug, season, episode)
else:
pick_source_dir(mode, hosters, video_type, slug, season, episode)
finally:
utils.reap_workers(workers, None)
def filter_unusable_hosters(hosters):
filtered_hosters = []
filter_max = int(_SALTS.get_setting('filter_unusable'))
for i, hoster in enumerate(hosters):
if i < filter_max and 'direct' in hoster and hoster['direct'] == False:
hmf = urlresolver.HostedMediaFile(host=hoster['host'], media_id='dummy') # use dummy media_id to force host validation
if not hmf:
log_utils.log('Unusable source %s (%s) from %s' % (hoster['url'], hoster['host'], hoster['class'].get_name()), xbmc.LOGINFO)
continue
filtered_hosters.append(hoster)
return filtered_hosters
@url_dispatcher.register(MODES.RESOLVE_SOURCE, ['mode', 'class_url', 'video_type', 'slug', 'class_name'], ['season', 'episode'])
@url_dispatcher.register(MODES.DIRECT_DOWNLOAD, ['mode', 'class_url', 'video_type', 'slug', 'class_name'], ['season', 'episode'])
def resolve_source(mode, class_url, video_type, slug, class_name, season='', episode=''):
for cls in utils.relevant_scrapers(video_type):
if cls.get_name() == class_name:
scraper_instance = cls()
break
else:
log_utils.log('Unable to locate scraper with name: %s' % (class_name))
return False
hoster_url = scraper_instance.resolve_link(class_url)
if mode == MODES.DIRECT_DOWNLOAD:
_SALTS.end_of_directory()
return play_source(mode, hoster_url, video_type, slug, season, episode)
@url_dispatcher.register(MODES.PLAY_TRAILER, ['stream_url'])
def play_trailer(stream_url):
xbmc.Player().play(stream_url)
def download_subtitles(language, title, year, season, episode):
srt_scraper = SRT_Scraper()
tvshow_id = srt_scraper.get_tvshow_id(title, year)
if tvshow_id is None:
return
subs = srt_scraper.get_episode_subtitles(language, tvshow_id, season, episode)
sub_labels = []
for sub in subs:
sub_labels.append(utils.format_sub_label(sub))
index = 0
if len(sub_labels) > 1:
dialog = xbmcgui.Dialog()
index = dialog.select('Choose a subtitle to download', sub_labels)
if subs and index > -1:
return srt_scraper.download_subtitle(subs[index]['url'])
def play_source(mode, hoster_url, video_type, slug, season='', episode=''):
global urlresolver
import urlresolver
if hoster_url is None:
return False
hmf = urlresolver.HostedMediaFile(url=hoster_url)
if not hmf:
log_utils.log('hoster_url not supported by urlresolver: %s' % (hoster_url))
stream_url = hoster_url
else:
stream_url = hmf.resolve()
if not stream_url or not isinstance(stream_url, basestring):
try: msg = stream_url.msg
except: msg = hoster_url
builtin = 'XBMC.Notification(%s,Link Resolve Failed: %s, 7500, %s)'
xbmc.executebuiltin(builtin % (_SALTS.get_name(), msg, ICON_PATH))
return False
resume_point = 0
if mode not in [MODES.DOWNLOAD_SOURCE, MODES.DIRECT_DOWNLOAD]:
if utils.bookmark_exists(slug, season, episode):
if utils.get_resume_choice(slug, season, episode):
resume_point = utils.get_bookmark(slug, season, episode)
log_utils.log('Resume Point: %s' % (resume_point), xbmc.LOGDEBUG)
try:
win = xbmcgui.Window(10000)
win.setProperty('salts.playing', 'True')
win.setProperty('salts.playing.slug', slug)
win.setProperty('salts.playing.season', str(season))
win.setProperty('salts.playing.episode', str(episode))
if _SALTS.get_setting('trakt_bookmark') == 'true':
win.setProperty('salts.playing.trakt_resume', str(resume_point))
art = {'thumb': '', 'fanart': ''}
info = {}
show_meta = {}
if video_type == VIDEO_TYPES.EPISODE:
path = _SALTS.get_setting('tv-download-folder')
file_name = utils.filename_from_title(slug, VIDEO_TYPES.TVSHOW)
file_name = file_name % ('%02d' % int(season), '%02d' % int(episode))
ep_meta = trakt_api.get_episode_details(slug, season, episode)
show_meta = trakt_api.get_show_details(slug)
people = trakt_api.get_people(SECTIONS.TV, slug) if _SALTS.get_setting('include_people') == 'true' else None
info = utils.make_info(ep_meta, show_meta, people)
images = {}
images['images'] = show_meta['images']
images['images'].update(ep_meta['images'])
art = utils.make_art(images)
path = make_path(path, VIDEO_TYPES.TVSHOW, show_meta['title'], season=season)
file_name = utils.filename_from_title(show_meta['title'], VIDEO_TYPES.TVSHOW)
file_name = file_name % ('%02d' % int(season), '%02d' % int(episode))
else:
path = _SALTS.get_setting('movie-download-folder')
file_name = utils.filename_from_title(slug, video_type)
item = trakt_api.get_movie_details(slug)
people = trakt_api.get_people(SECTIONS.MOVIES, slug) if _SALTS.get_setting('include_people') == 'true' else None
info = utils.make_info(item, people=people)
art = utils.make_art(item)
path = make_path(path, video_type, item['title'], item['year'])
file_name = utils.filename_from_title(item['title'], video_type, item['year'])
except TransientTraktError as e:
log_utils.log('During Playback: %s' % (str(e)), xbmc.LOGWARNING) # just log warning if trakt calls fail and leave meta and art blank
if mode in [MODES.DOWNLOAD_SOURCE, MODES.DIRECT_DOWNLOAD]:
utils.download_media(stream_url, path, file_name)
return True
if video_type == VIDEO_TYPES.EPISODE and utils.srt_download_enabled() and show_meta:
srt_path = download_subtitles(_SALTS.get_setting('subtitle-lang'), show_meta['title'], show_meta['year'], season, episode)
if utils.srt_show_enabled() and srt_path:
log_utils.log('Setting srt path: %s' % (srt_path), xbmc.LOGDEBUG)
win.setProperty('salts.playing.srt', srt_path)
listitem = xbmcgui.ListItem(path=stream_url, iconImage=art['thumb'], thumbnailImage=art['thumb'])
if _SALTS.get_setting('trakt_bookmark') != 'true':
listitem.setProperty('ResumeTime', str(resume_point))
listitem.setProperty('Totaltime', str(99999)) # dummy value to force resume to work
listitem.setProperty('fanart_image', art['fanart'])
try: listitem.setArt(art)
except: pass
listitem.setProperty('IsPlayable', 'true')
listitem.setPath(stream_url)
listitem.setInfo('video', info)
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, listitem)
return True
def auto_play_sources(hosters, video_type, slug, season, episode):
for item in hosters:
if item['multi-part']:
continue
hoster_url = item['class'].resolve_link(item['url'])
log_utils.log('Auto Playing: %s' % (hoster_url), xbmc.LOGDEBUG)
if play_source(MODES.GET_SOURCES, hoster_url, video_type, slug, season, episode):
return True
else:
msg = 'All sources failed to play'
log_utils.log(msg, xbmc.LOGERROR)
builtin = 'XBMC.Notification(%s,%s, 5000, %s)'
xbmc.executebuiltin(builtin % (_SALTS.get_name(), msg, ICON_PATH))
def pick_source_dialog(hosters):
for item in hosters:
if item['multi-part']:
continue
label = item['class'].format_source_label(item)
label = '[%s] %s' % (item['class'].get_name(), label)
item['label'] = label
dialog = xbmcgui.Dialog()
index = dialog.select('Choose your stream', [item['label'] for item in hosters if 'label' in item])
if index > -1:
try:
if hosters[index]['url']:
hoster_url = hosters[index]['class'].resolve_link(hosters[index]['url'])
log_utils.log('Attempting to play url: %s' % hoster_url)
return hoster_url
except Exception as e:
log_utils.log('Error (%s) while trying to resolve %s' % (str(e), hosters[index]['url']), xbmc.LOGERROR)
def pick_source_dir(mode, hosters, video_type, slug, season='', episode=''):
if mode == MODES.DOWNLOAD_SOURCE:
next_mode = MODES.DIRECT_DOWNLOAD
folder = True
else:
next_mode = MODES.RESOLVE_SOURCE
folder = False
hosters_len = len(hosters)
for item in hosters:
if item['multi-part']:
continue
label = item['class'].format_source_label(item)
label = '[%s] %s' % (item['class'].get_name(), label)
item['label'] = label
# log_utils.log(item, xbmc.LOGDEBUG)
queries = {'mode': next_mode, 'class_url': item['url'], 'video_type': video_type, 'slug': slug, 'season': season, 'episode': episode, 'class_name': item['class'].get_name()}
_SALTS.add_directory(queries, infolabels={'title': item['label']}, is_folder=folder, img='', fanart='', total_items=hosters_len)
_SALTS.end_of_directory()
@url_dispatcher.register(MODES.SET_URL_MANUAL, ['mode', 'video_type', 'title', 'year', 'slug'], ['season', 'episode', 'ep_title'])
@url_dispatcher.register(MODES.SET_URL_SEARCH, ['mode', 'video_type', 'title', 'year', 'slug'], ['season', 'episode', 'ep_title'])
def set_related_url(mode, video_type, title, year, slug, season='', episode='', ep_title=''):
related_list = []
timeout = max_timeout = int(_SALTS.get_setting('source_timeout'))
if max_timeout == 0: timeout = None
worker_count = 0
workers = []
if utils.P_MODE != P_MODES.NONE: q = utils.Queue()
begin = time.time()
video = ScraperVideo(video_type, title, year, slug, season, episode, ep_title)
for cls in utils.relevant_scrapers(video_type, order_matters=True):
if utils.P_MODE == P_MODES.NONE:
related = {}
related['class'] = cls(max_timeout)
url = related['class'].get_url(video)
if not url: url = ''
related['url'] = url
related['name'] = related['class'].get_name()
related['label'] = '[%s] %s' % (related['name'], related['url'])
related_list.append(related)
else:
worker = utils.start_worker(q, utils.parallel_get_url, [cls, video])
utils.increment_setting('%s_try' % (cls.get_name()))
worker_count += 1
workers.append(worker)
related = {'class': cls(max_timeout), 'name': cls.get_name(), 'label': '[%s]' % (cls.get_name()), 'url': ''}
related_list.append(related)
# collect results from workers
if utils.P_MODE != P_MODES.NONE:
fails = dict.fromkeys([item['name'] for item in related_list], True)
while worker_count > 0:
try:
log_utils.log('Calling get with timeout: %s' % (timeout), xbmc.LOGDEBUG)
result = q.get(True, timeout)
log_utils.log('Got result: %s' % (result), xbmc.LOGDEBUG)
# related_list.append(result)
for i, item in enumerate(related_list):
if item['name'] == result['name']:
related_list[i] = result
del fails[result['name']]
worker_count -= 1
if max_timeout > 0:
timeout = max_timeout - (time.time() - begin)
if timeout < 0: timeout = 0
except utils.Empty:
log_utils.log('Get Url Timeout', xbmc.LOGWARNING)
utils.record_timeouts(fails)
break
else:
log_utils.log('All source results received')
total = len(workers)
timeouts = len(fails)
timeout_msg = 'Scraper Timeouts: %s/%s' % (timeouts, total) if timeouts else ''
if timeout_msg:
builtin = 'XBMC.Notification(%s,%s, 5000, %s)'
xbmc.executebuiltin(builtin % (_SALTS.get_name(), timeout_msg, ICON_PATH))
for related in related_list:
if related['name'] in fails:
related['label'] = '[COLOR darkred]%s[/COLOR]' % (related['label'])
workers = utils.reap_workers(workers)
try:
dialog = xbmcgui.Dialog()
index = dialog.select('Url To Change (%s)' % (video_type), [related['label'] for related in related_list])
if index > -1:
if mode == MODES.SET_URL_MANUAL:
keyboard = xbmc.Keyboard()
keyboard.setHeading('Related %s url at %s' % (video_type, related_list[index]['name']))
keyboard.setDefault(related_list[index]['url'])
keyboard.doModal()
if keyboard.isConfirmed():
new_url = keyboard.getText()
utils.update_url(video_type, title, year, related_list[index]['name'], related_list[index]['url'], new_url, season, episode)
builtin = 'XBMC.Notification(%s,[COLOR blue]%s[/COLOR] Related Url Set, 5000, %s)'
xbmc.executebuiltin(builtin % (_SALTS.get_name(), related_list[index]['name'], ICON_PATH))
elif mode == MODES.SET_URL_SEARCH:
temp_title = title
temp_year = year
while True:
dialog = xbmcgui.Dialog()
choices = ['Manual Search']
try:
log_utils.log('Searching for: |%s|%s|' % (temp_title, temp_year), xbmc.LOGDEBUG)
results = related_list[index]['class'].search(video_type, temp_title, temp_year)
for result in results:
choice = result['title']
if result['year']: choice = '%s (%s)' % (choice, result['year'])
choices.append(choice)
results_index = dialog.select('Select Related', choices)
if results_index == 0:
keyboard = xbmc.Keyboard()
keyboard.setHeading('Enter Search')
text = temp_title
if temp_year: text = '%s (%s)' % (text, temp_year)
keyboard.setDefault(text)
keyboard.doModal()
if keyboard.isConfirmed():
match = re.match('([^\(]+)\s*\(*(\d{4})?\)*', keyboard.getText())
temp_title = match.group(1).strip()
temp_year = match.group(2) if match.group(2) else ''
elif results_index > 0:
utils.update_url(video_type, title, year, related_list[index]['name'], related_list[index]['url'], results[results_index - 1]['url'], season, episode)
builtin = 'XBMC.Notification(%s,[COLOR blue]%s[/COLOR] Related Url Set, 5000, %s)'
xbmc.executebuiltin(builtin % (_SALTS.get_name(), related_list[index]['name'], ICON_PATH))
break
else:
break
except NotImplementedError:
log_utils.log('%s Scraper does not support searching.' % (related_list[index]['class'].get_name()))
builtin = 'XBMC.Notification(%s,%s Scraper does not support searching, 5000, %s)'
xbmc.executebuiltin(builtin % (_SALTS.get_name(), related_list[index]['class'].get_name(), ICON_PATH))
break
finally:
utils.reap_workers(workers, None)
@url_dispatcher.register(MODES.RATE, ['section', 'id_type', 'show_id'], ['season', 'episode'])
def rate_media(section, id_type, show_id, season='', episode=''):
# disabled until fixes for rating are made in official addon
if False and xbmc.getCondVisibility('System.HasAddon(script.trakt)'):
run = 'RunScript(script.trakt, action=rate, media_type=%s, remoteid=%s'
if section == SECTIONS.MOVIES:
run = (run + ')') % ('movie', show_id)
else:
if season and episode:
run = (run + ', season=%s, episode=%s)') % ('episode', show_id, season, episode)
else:
run = (run + ')') % ('show', show_id)
xbmc.executebuiltin(run)
else:
item = {id_type: show_id}
keyboard = xbmc.Keyboard()
keyboard.setHeading('Enter Rating (unrate, or 1-10)')
while True:
keyboard.doModal()
if keyboard.isConfirmed():
rating = keyboard.getText()
rating = rating.lower()
if rating in ['unrate'] + [str(i) for i in range(1, 11)]:
break
else:
return
if rating == 'unrate': rating = None
trakt_api.rate(section, item, rating, season, episode)
@url_dispatcher.register(MODES.EDIT_TVSHOW_ID, ['title'], ['year'])
def edit_tvshow_id(title, year=''):
srt_scraper = SRT_Scraper()
tvshow_id = srt_scraper.get_tvshow_id(title, year)
keyboard = xbmc.Keyboard()
keyboard.setHeading('Input TVShow ID')
if tvshow_id:
keyboard.setDefault(str(tvshow_id))
keyboard.doModal()
if keyboard.isConfirmed():
db_connection.set_related_url(VIDEO_TYPES.TVSHOW, title, year, SRT_SOURCE, keyboard.getText())
@url_dispatcher.register(MODES.REM_FROM_LIST, ['slug', 'section', 'id_type', 'show_id'])
def remove_from_list(slug, section, id_type, show_id):
item = {'type': TRAKT_SECTIONS[section][:-1], id_type: show_id}
remove_many_from_list(section, item, slug)
xbmc.executebuiltin("XBMC.Container.Refresh")
def remove_many_from_list(section, items, slug):
if slug == utils.WATCHLIST_SLUG:
response = trakt_api.remove_from_watchlist(section, items)
else:
response = trakt_api.remove_from_list(section, slug, items)
return response
@url_dispatcher.register(MODES.ADD_TO_COLL, ['mode', 'section', 'id_type', 'show_id'])
@url_dispatcher.register(MODES.REM_FROM_COLL, ['mode', 'section', 'id_type', 'show_id'])
def manage_collection(mode, section, id_type, show_id):
item = {id_type: show_id}
if mode == MODES.ADD_TO_COLL:
trakt_api.add_to_collection(section, item)
msg = 'Item Added to Collection'
else:
trakt_api.remove_from_collection(section, item)
msg = 'Item Removed from Collection'
builtin = "XBMC.Notification(%s,%s, 2000, %s)" % (_SALTS.get_name(), msg, ICON_PATH)
xbmc.executebuiltin(builtin)
xbmc.executebuiltin("XBMC.Container.Refresh")
@url_dispatcher.register(MODES.ADD_TO_LIST, ['section', 'id_type', 'show_id'], ['slug'])
def add_to_list(section, id_type, show_id, slug=None):
add_many_to_list(section, {id_type: show_id}, slug)
builtin = "XBMC.Notification(%s,Item Added to List, 2000, %s)" % (_SALTS.get_name(), ICON_PATH)
xbmc.executebuiltin(builtin)
xbmc.executebuiltin("XBMC.Container.Refresh")
def add_many_to_list(section, items, slug=None):
if not slug: slug = utils.choose_list()
if slug == utils.WATCHLIST_SLUG:
response = trakt_api.add_to_watchlist(section, items)
elif slug:
response = trakt_api.add_to_list(section, slug, items)
return response
@url_dispatcher.register(MODES.COPY_LIST, ['section', 'slug'], ['username', 'target_slug'])
def copy_list(section, slug, username=None, target_slug=None):
if slug == COLLECTION_SLUG:
items = trakt_api.get_collection(section)
else:
items = trakt_api.show_list(slug, section, username)
copy_items = []
for item in items:
query = utils.show_id(item)
copy_item = {'type': TRAKT_SECTIONS[section][:-1], query['id_type']: query['show_id']}
copy_items.append(copy_item)
response = add_many_to_list(section, copy_items, target_slug)
builtin = "XBMC.Notification(%s,List Copied: (A:%s/ E:%s/ S:%s), 5000, %s)" % (_SALTS.get_name(), response['inserted'], response['already_exist'], response['skipped'], ICON_PATH)
xbmc.executebuiltin(builtin)
@url_dispatcher.register(MODES.TOGGLE_TITLE, ['slug'])
def toggle_title(slug):
filter_list = utils.get_force_title_list()
if slug in filter_list:
del filter_list[filter_list.index(slug)]
else:
filter_list.append(slug)
filter_str = '|'.join(filter_list)
_SALTS.set_setting('force_title_match', filter_str)
xbmc.executebuiltin("XBMC.Container.Refresh")
@url_dispatcher.register(MODES.TOGGLE_WATCHED, ['section', 'id_type', 'show_id'], ['watched', 'season', 'episode'])
def toggle_watched(section, id_type, show_id, watched=True, season='', episode=''):
log_utils.log('In Watched: |%s|%s|%s|%s|%s|%s|' % (section, id_type, show_id, season, episode, watched), xbmc.LOGDEBUG)
item = {id_type: show_id}
trakt_api.set_watched(section, item, season, episode, watched)
w_str = 'Watched' if watched else 'Unwatched'
builtin = "XBMC.Notification(%s,Marked as %s,5000,%s)" % (_SALTS.get_name(), w_str, ICON_PATH)
xbmc.executebuiltin(builtin)
xbmc.executebuiltin("XBMC.Container.Refresh")
@url_dispatcher.register(MODES.URL_EXISTS, ['slug'])
def toggle_url_exists(slug):
show_str = _SALTS.get_setting('exists_list')
if show_str:
show_list = show_str.split('|')
else:
show_list = []
if slug in show_list:
show_list.remove(slug)
else:
show_list.append(slug)
show_str = '|'.join(show_list)
_SALTS.set_setting('exists_list', show_str)
xbmc.executebuiltin("XBMC.Container.Refresh")
@url_dispatcher.register(MODES.UPDATE_SUBS)
def update_subscriptions():
log_utils.log('Updating Subscriptions', xbmc.LOGDEBUG)
dialog = None
if _SALTS.get_setting(MODES.UPDATE_SUBS + '-notify') == 'true':
dialog = xbmcgui.DialogProgressBG()
dialog.create('Stream All The Sources', 'Updating Subscriptions...')
dialog.update(0)
update_strms(SECTIONS.TV, dialog)
if _SALTS.get_setting('include_movies') == 'true':
update_strms(SECTIONS.MOVIES, dialog)
if _SALTS.get_setting('library-update') == 'true':
xbmc.executebuiltin('UpdateLibrary(video)')
if _SALTS.get_setting('cleanup-subscriptions') == 'true':
clean_subs()
now = datetime.datetime.now()
db_connection.set_setting('%s-last_run' % MODES.UPDATE_SUBS, now.strftime("%Y-%m-%d %H:%M:%S.%f"))
if _SALTS.get_setting(MODES.UPDATE_SUBS + '-notify') == 'true':
dialog.close()
if _SALTS.get_setting('auto-' + MODES.UPDATE_SUBS) == 'true':
builtin = "XBMC.Notification(%s,Next Update in %0.1f hours,5000, %s)" % (_SALTS.get_name(), float(_SALTS.get_setting(MODES.UPDATE_SUBS + '-interval')), ICON_PATH)
xbmc.executebuiltin(builtin)
xbmc.executebuiltin("XBMC.Container.Refresh")
def update_strms(section, dialog=None):
section_params = utils.get_section_params(section)
slug = _SALTS.get_setting('%s_sub_slug' % (section))
if not slug:
return
elif slug == utils.WATCHLIST_SLUG:
items = trakt_api.show_watchlist(section)
else:
items = trakt_api.show_list(slug, section)
length = len(items)
for i, item in enumerate(items):
if dialog:
percent_progress = i * 100 / length
dialog.update(percent_progress, 'Stream All The Sources', 'Updating %s: %s (%s)' % (section, re.sub(' \(\d{4}\)$', '', item['title']), item['year']))
add_to_library(section_params['video_type'], item['title'], item['year'], item['ids']['slug'])
@url_dispatcher.register(MODES.CLEAN_SUBS)
def clean_subs():
slug = _SALTS.get_setting('TV_sub_slug')
if not slug:
return
elif slug == utils.WATCHLIST_SLUG:
items = trakt_api.show_watchlist(SECTIONS.TV)
else:
items = trakt_api.show_list(slug, SECTIONS.TV)
del_items = []
for item in items:
show_slug = item['ids']['slug']
show = trakt_api.get_show_details(show_slug)
if show['status'].upper() in ['ENDED', 'CANCELED', 'CANCELLED']:
show_id = utils.show_id(item)
del_items.append({show_id['id_type']: show_id['show_id']})
if del_items:
if slug == utils.WATCHLIST_SLUG:
trakt_api.remove_from_watchlist(SECTIONS.TV, del_items)
else:
trakt_api.remove_from_list(SECTIONS.TV, slug, del_items)
@url_dispatcher.register(MODES.FLUSH_CACHE)
def flush_cache():
dlg = xbmcgui.Dialog()
ln1 = 'Are you sure you want to delete the url cache?'
ln2 = 'This will slow things down until all urls are re-cached'
ln3 = ''
yes = 'Keep'
no = 'Delete'
if dlg.yesno('Flush web cache', ln1, ln2, ln3, yes, no):
db_connection.flush_cache()
@url_dispatcher.register(MODES.RESET_DB)
def reset_db():
if db_connection.reset_db():
message = 'DB Reset Successful'
else:
message = 'Reset only allowed on sqlite DBs'
builtin = "XBMC.Notification(PrimeWire,%s,2000, %s)" % (message, ICON_PATH)
xbmc.executebuiltin(builtin)
@url_dispatcher.register(MODES.EXPORT_DB)
def export_db():
try:
dialog = xbmcgui.Dialog()
export_path = dialog.browse(0, 'Select Export Directory', 'files')
if export_path:
export_path = xbmc.translatePath(export_path)
keyboard = xbmc.Keyboard('export.csv', 'Enter Export Filename')
keyboard.doModal()
if keyboard.isConfirmed():
export_filename = keyboard.getText()
export_file = export_path + export_filename
db_connection.export_from_db(export_file)
builtin = "XBMC.Notification(Export Successful,Exported to %s,2000, %s)" % (export_file, ICON_PATH)
xbmc.executebuiltin(builtin)
except Exception as e:
log_utils.log('Export Failed: %s' % (e), xbmc.LOGERROR)
builtin = "XBMC.Notification(Export,Export Failed,2000, %s)" % (ICON_PATH)
xbmc.executebuiltin(builtin)
@url_dispatcher.register(MODES.IMPORT_DB)
def import_db():
try:
dialog = xbmcgui.Dialog()
import_file = dialog.browse(1, 'Select Import File', 'files')
if import_file:
import_file = xbmc.translatePath(import_file)
db_connection.import_into_db(import_file)
builtin = "XBMC.Notification(Import Success,Imported from %s,5000, %s)" % (import_file, ICON_PATH)
xbmc.executebuiltin(builtin)
except Exception as e:
log_utils.log('Import Failed: %s' % (e), xbmc.LOGERROR)
builtin = "XBMC.Notification(Import,Import Failed,2000, %s)" % (ICON_PATH)
xbmc.executebuiltin(builtin)
raise
@url_dispatcher.register(MODES.ADD_TO_LIBRARY, ['video_type', 'title', 'year', 'slug'])
def add_to_library(video_type, title, year, slug):
log_utils.log('Creating .strm for |%s|%s|%s|%s|' % (video_type, title, year, slug), xbmc.LOGDEBUG)
if video_type == VIDEO_TYPES.TVSHOW:
save_path = _SALTS.get_setting('tvshow-folder')
save_path = xbmc.translatePath(save_path)
show = trakt_api.get_show_details(slug)
show['title'] = re.sub(' \(\d{4}\)$', '', show['title']) # strip off year if it's part of show title
seasons = trakt_api.get_seasons(slug)
include_unknown = _SALTS.get_setting('include_unknown') == 'true'
if not seasons:
log_utils.log('No Seasons found for %s (%s)' % (show['title'], show['year']), xbmc.LOGERROR)
for season in seasons:
season_num = season['number']
if _SALTS.get_setting('include_specials') == 'true' or season_num != 0:
episodes = trakt_api.get_episodes(slug, season_num)
for episode in episodes:
if utils.show_requires_source(slug):
require_source = True
else:
if (episode['first_aired'] != None and utils.iso_2_utc(episode['first_aired']) <= time.time()) or (include_unknown and episode['first_aired'] == None):
require_source = False
else:
continue
ep_num = episode['number']
filename = utils.filename_from_title(show['title'], video_type)
filename = filename % ('%02d' % int(season_num), '%02d' % int(ep_num))
final_path = os.path.join(make_path(save_path, video_type, show['title'], season=season_num), filename)
strm_string = _SALTS.build_plugin_url({'mode': MODES.GET_SOURCES, 'video_type': VIDEO_TYPES.EPISODE, 'title': title, 'year': year, 'season': season_num,
'episode': ep_num, 'slug': slug, 'ep_title': episode['title'], 'dialog': True})
write_strm(strm_string, final_path, VIDEO_TYPES.EPISODE, show['title'], show['year'], slug, season_num, ep_num, require_source=require_source)
elif video_type == VIDEO_TYPES.MOVIE:
save_path = _SALTS.get_setting('movie-folder')
save_path = xbmc.translatePath(save_path)
strm_string = _SALTS.build_plugin_url({'mode': MODES.GET_SOURCES, 'video_type': video_type, 'title': title, 'year': year, 'slug': slug, 'dialog': True})
filename = utils.filename_from_title(title, VIDEO_TYPES.MOVIE, year)
final_path = os.path.join(make_path(save_path, video_type, title, year), filename)
write_strm(strm_string, final_path, VIDEO_TYPES.MOVIE, title, year, slug, require_source=_SALTS.get_setting('require_source') == 'true')
def make_path(base_path, video_type, title, year='', season=''):
path = base_path
if video_type == VIDEO_TYPES.TVSHOW:
show_folder = re.sub(r'([^\w\-_\. ]|\.$)', '_', title)
path = os.path.join(base_path, show_folder, 'Season %s' % (season))
else:
dir_name = title if not year else '%s (%s)' % (title, year)
path = os.path.join(base_path, dir_name)
return path
def write_strm(stream, path, video_type, title, year, slug, season='', episode='', require_source=False):
path = xbmc.makeLegalFilename(path)
if not xbmcvfs.exists(os.path.dirname(path)):
try:
try: xbmcvfs.mkdirs(os.path.dirname(path))
except: os.mkdir(os.path.dirname(path))
except Exception as e:
log_utils.log('Failed to create directory %s: %s' % path, xbmc.LOGERROR, str(e))
old_strm_string = ''
try:
f = xbmcvfs.File(path, 'r')
old_strm_string = f.read()
f.close()
except: pass
# print "Old String: %s; New String %s" %(old_strm_string,strm_string)
# string will be blank if file doesn't exist or is blank
if stream != old_strm_string:
try:
if not require_source or utils.url_exists(ScraperVideo(video_type, title, year, slug, season, episode)):
log_utils.log('Writing strm: %s' % stream)
file_desc = xbmcvfs.File(path, 'w')
file_desc.write(stream)
file_desc.close()
else:
log_utils.log('No strm written for |%s|%s|%s|%s|%s|' % (video_type, title, year, season, episode), xbmc.LOGWARNING)
except Exception as e:
log_utils.log('Failed to create .strm file (%s): %s' % (path, e), xbmc.LOGERROR)
def show_pickable_list(slug, pick_label, pick_mode, section):
if not slug:
liz = xbmcgui.ListItem(label=pick_label)
liz_url = _SALTS.build_plugin_url({'mode': pick_mode, 'section': section})
xbmcplugin.addDirectoryItem(int(sys.argv[1]), liz_url, liz, isFolder=False)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
else:
show_list(section, slug)
def make_dir_from_list(section, list_data, slug=None, query=None, page=None):
section_params = utils.get_section_params(section)
totalItems = len(list_data)
cache_watched = _SALTS.get_setting('cache_watched') == 'true'
watched = {}
in_collection = {}
if TOKEN:
watched_history = trakt_api.get_watched(section, cached=cache_watched)
for item in watched_history:
if section == SECTIONS.MOVIES:
watched[item['movie']['ids']['slug']] = item['plays'] > 0
else:
watched[item['show']['ids']['slug']] = len([e for s in item['seasons'] if s['number'] != 0 for e in s['episodes']])
collection = trakt_api.get_collection(section, full=False, cached=_SALTS.get_setting('cache_collection') == 'true')
in_collection = dict.fromkeys([show['ids']['slug'] for show in collection], True)
for show in list_data:
menu_items = []
show_id = utils.show_id(show)
if slug and slug != COLLECTION_SLUG:
queries = {'mode': MODES.REM_FROM_LIST, 'slug': slug, 'section': section}
queries.update(show_id)
menu_items.append(('Remove from List', 'RunPlugin(%s)' % (_SALTS.build_plugin_url(queries))),)
sub_slug = _SALTS.get_setting('%s_sub_slug' % (section))
if TOKEN and sub_slug:
if sub_slug != slug:
queries = {'mode': MODES.ADD_TO_LIST, 'section': section_params['section'], 'slug': sub_slug}
queries.update(show_id)
menu_items.append(('Subscribe', 'RunPlugin(%s)' % (_SALTS.build_plugin_url(queries))),)
elif section == SECTIONS.TV:
show_slug = show['ids']['slug']
if utils.show_requires_source(show_slug):
label = 'Require Aired Only?'
else:
label = 'Require Page Only?'
queries = {'mode': MODES.URL_EXISTS, 'slug': show_slug}
menu_items.append((label, 'RunPlugin(%s)' % (_SALTS.build_plugin_url(queries))),)
if section == SECTIONS.MOVIES:
show['watched'] = watched.get(show['ids']['slug'], False)
else:
try:
show['watched'] = watched[show['ids']['slug']] >= show['aired_episodes']
show['watched_count'] = watched[show['ids']['slug']]
except: show['watched'] = False
show['in_collection'] = in_collection.get(show['ids']['slug'], False)
liz, liz_url = make_item(section_params, show, menu_items)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), liz_url, liz, isFolder=section_params['folder'], totalItems=totalItems)
if query and page and totalItems >= int(_SALTS.get_setting('list_size')):
meta = {'title': 'Next Page >>'}
query['page'] = int(page) + 1
_SALTS.add_directory(query, meta, img=utils.art('nextpage.png'), fanart=utils.art('fanart.jpg'), is_folder=True)
utils.set_view(section_params['content_type'], False)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
def make_dir_from_cal(mode, start_date, days):
try: start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d')
except TypeError: start_date = datetime.datetime(*(time.strptime(start_date, '%Y-%m-%d')[0:6]))
last_week = start_date - datetime.timedelta(days=7)
next_week = start_date + datetime.timedelta(days=7)
last_str = datetime.datetime.strftime(last_week, '%Y-%m-%d')
next_str = datetime.datetime.strftime(next_week, '%Y-%m-%d')
liz = xbmcgui.ListItem(label='<< Previous Week', iconImage=utils.art('previous.png'), thumbnailImage=utils.art('previous.png'))
liz.setProperty('fanart_image', utils.art('fanart.jpg'))
liz_url = _SALTS.build_plugin_url({'mode': mode, 'start_date': last_str})
xbmcplugin.addDirectoryItem(int(sys.argv[1]), liz_url, liz, isFolder=True)
cache_watched = _SALTS.get_setting('cache_watched') == 'true'
watched = {}
if TOKEN:
watched_history = trakt_api.get_watched(SECTIONS.TV, cached=cache_watched)
for item in watched_history:
slug = item['show']['ids']['slug']
watched[slug] = {}
for season in item['seasons']:
watched[slug][season['number']] = {}
for episode in season['episodes']:
watched[slug][season['number']][episode['number']] = True
totalItems = len(days)
for day in sorted(days.items()):
for item in day[1]:
episode = item['episode']
show = item['show']
fanart = show['images']['fanart']['full']
utc_secs = utils.iso_2_utc(episode['first_aired'])
show_date = datetime.date.fromtimestamp(utc_secs)
try: episode['watched'] = watched[show['ids']['slug']][episode['season']][episode['number']]
except: episode['watched'] = False
if show_date < start_date.date():
log_utils.log('Skipping show before start: |%s| before |%s|' % (show_date, start_date.date()), xbmc.LOGDEBUG)
continue
elif show_date > next_week.date():
log_utils.log('Stopping because show after end: |%s| before |%s|' % (show_date, next_week.date()), xbmc.LOGDEBUG)
break
date = utils.make_day(datetime.date.fromtimestamp(utc_secs).isoformat())
if _SALTS.get_setting('calendar_time') != '0':
date_time = '%s@%s' % (date, utils.make_time(utc_secs))
else:
date_time = date
menu_items = []
queries = {'mode': MODES.SEASONS, 'slug': show['ids']['slug'], 'fanart': fanart}
menu_items.append(('Browse Seasons', 'Container.Update(%s)' % (_SALTS.build_plugin_url(queries))),)
liz, liz_url = make_episode_item(show, episode, show_subs=False, menu_items=menu_items)
label = liz.getLabel()
label = '[[COLOR deeppink]%s[/COLOR]] %s - %s' % (date_time, show['title'], label.decode('utf-8', 'replace'))
if episode['season'] == 1 and episode['number'] == 1:
label = '[COLOR green]%s[/COLOR]' % (label)
liz.setLabel(label)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), liz_url, liz, isFolder=(liz.getProperty('isPlayable') != 'true'), totalItems=totalItems)
liz = xbmcgui.ListItem(label='Next Week >>', iconImage=utils.art('next.png'), thumbnailImage=utils.art('next.png'))
liz.setProperty('fanart_image', utils.art('fanart.jpg'))
liz_url = _SALTS.build_plugin_url({'mode': mode, 'start_date': next_str})
xbmcplugin.addDirectoryItem(int(sys.argv[1]), liz_url, liz, isFolder=True)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
def make_season_item(season, info, slug, fanart):
label = 'Season %s' % (season['number'])
season['images']['fanart'] = {}
season['images']['fanart']['full'] = fanart
liz = utils.make_list_item(label, season)
log_utils.log('Season Info: %s' % (info), xbmc.LOGDEBUG)
liz.setInfo('video', info)
menu_items = []
if 'playcount' in info and info['playcount']:
watched = False
label = 'Mark as Unwatched'
else:
watched = True
label = 'Mark as Watched'
if TOKEN:
queries = {'mode': MODES.TOGGLE_WATCHED, 'section': SECTIONS.TV, 'season': season['number'], 'id_type': 'slug', 'show_id': slug, 'watched': watched}
menu_items.append((label, 'RunPlugin(%s)' % (_SALTS.build_plugin_url(queries))),)
queries = {'mode': MODES.SET_VIEW, 'content_type': CONTENT_TYPES.SEASONS}
menu_items.append(('Set as Season View', 'RunPlugin(%s)' % (_SALTS.build_plugin_url(queries))),)
liz.addContextMenuItems(menu_items, replaceItems=True)
return liz
def make_episode_item(show, episode, show_subs=True, menu_items=None):
log_utils.log('Make Episode: Show: %s, Episode: %s, Show Subs: %s' % (show, episode, show_subs), xbmc.LOGDEBUG)
log_utils.log('Make Episode: Episode: %s' % (episode), xbmc.LOGDEBUG)
if menu_items is None: menu_items = []
folder = _SALTS.get_setting('source-win') == 'Directory' and _SALTS.get_setting('auto-play') == 'false'
show['title'] = re.sub(' \(\d{4}\)$', '', show['title'])
label = '%sx%s %s' % (episode['season'], episode['number'], episode['title'])
if 'first_aired' in episode: utc_air_time = utils.iso_2_utc(episode['first_aired'])
try: time_str = time.asctime(time.localtime(utc_air_time))
except: time_str = 'Unavailable'
log_utils.log('First Aired: Title: %s S/E: %s/%s fa: %s, utc: %s, local: %s' %
(show['title'], episode['season'], episode['number'], episode['first_aired'], utc_air_time, time_str), xbmc.LOGDEBUG)
if _SALTS.get_setting('unaired_indicator') == 'true' and (not episode['first_aired'] or utc_air_time > time.time()):
label = '[I][COLOR chocolate]%s[/COLOR][/I]' % (label)
if show_subs and utils.srt_indicators_enabled():
srt_scraper = SRT_Scraper()
language = _SALTS.get_setting('subtitle-lang')
tvshow_id = srt_scraper.get_tvshow_id(show['title'], show['year'])
if tvshow_id is not None:
srts = srt_scraper.get_episode_subtitles(language, tvshow_id, episode['season'], episode['number'])
else:
srts = []
label = utils.format_episode_label(label, episode['season'], episode['number'], srts)
meta = utils.make_info(episode, show)
meta['images'] = show['images']
if episode['images']['screenshot']: meta['images']['thumb'] = episode['images']['screenshot']
liz = utils.make_list_item(label, meta)
if not folder:
liz.setProperty('isPlayable', 'true')
del meta['images']
liz.setInfo('video', meta)
queries = {'mode': MODES.GET_SOURCES, 'video_type': VIDEO_TYPES.EPISODE, 'title': show['title'], 'year': show['year'], 'season': episode['season'], 'episode': episode['number'],
'ep_title': episode['title'], 'slug': show['ids']['slug']}
liz_url = _SALTS.build_plugin_url(queries)
if _SALTS.get_setting('auto-play') == 'true':
queries = {'mode': MODES.SELECT_SOURCE, 'video_type': VIDEO_TYPES.EPISODE, 'title': show['title'], 'year': show['year'], 'season': episode['season'], 'episode': episode['number'],
'ep_title': episode['title'], 'slug': show['ids']['slug']}
if _SALTS.get_setting('source-win') == 'Dialog':
runstring = 'PlayMedia(%s)' % _SALTS.build_plugin_url(queries)
else:
runstring = 'Container.Update(%s)' % _SALTS.build_plugin_url(queries)
menu_items.insert(0, ('Select Source', runstring),)
if _SALTS.get_setting('show_download') == 'true':
queries = {'mode': MODES.DOWNLOAD_SOURCE, 'video_type': VIDEO_TYPES.EPISODE, 'title': show['title'], 'year': show['year'], 'season': episode['season'], 'episode': episode['number'],
'ep_title': episode['title'], 'slug': show['ids']['slug']}
if _SALTS.get_setting('source-win') == 'Dialog':
runstring = 'RunPlugin(%s)' % _SALTS.build_plugin_url(queries)
else:
runstring = 'Container.Update(%s)' % _SALTS.build_plugin_url(queries)
menu_items.append(('Download Source', runstring),)
if menu_items and menu_items[0][0] == 'Select Source':
menu_items.append(('Show Information', 'XBMC.Action(Info)'),)
else:
menu_items.insert(0, ('Show Information', 'XBMC.Action(Info)'),)
show_id = utils.show_id(show)
queries = {'mode': MODES.ADD_TO_LIST, 'section': SECTIONS.TV}
queries.update(show_id)
menu_items.append(('Add Show to List', 'RunPlugin(%s)' % (_SALTS.build_plugin_url(queries))),)
if 'watched' in episode and episode['watched']:
watched = False
label = 'Mark as Unwatched'
else:
watched = True
label = 'Mark as Watched'
if TOKEN:
show_id = utils.show_id(show)
queries = {'mode': MODES.RATE, 'section': SECTIONS.TV, 'season': episode['season'], 'episode': episode['number']}
queries.update(show_id)
menu_items.append(('Rate on trakt.tv', 'RunPlugin(%s)' % (_SALTS.build_plugin_url(queries))),)
queries = {'mode': MODES.TOGGLE_WATCHED, 'section': SECTIONS.TV, 'season': episode['season'], 'episode': episode['number'], 'watched': watched}
queries.update(show_id)
menu_items.append((label, 'RunPlugin(%s)' % (_SALTS.build_plugin_url(queries))),)
queries = {'mode': MODES.SET_URL_SEARCH, 'video_type': VIDEO_TYPES.TVSHOW, 'title': show['title'], 'year': show['year'], 'slug': show['ids']['slug']}
menu_items.append(('Set Related Show Url (Search)', 'RunPlugin(%s)' % (_SALTS.build_plugin_url(queries))),)
queries = {'mode': MODES.SET_URL_MANUAL, 'video_type': VIDEO_TYPES.EPISODE, 'title': show['title'], 'year': show['year'], 'season': episode['season'],
'episode': episode['number'], 'ep_title': episode['title'], 'slug': show['ids']['slug']}
menu_items.append(('Set Related Url (Manual)', 'RunPlugin(%s)' % (_SALTS.build_plugin_url(queries))),)
liz.addContextMenuItems(menu_items, replaceItems=True)
return liz, liz_url
def make_item(section_params, show, menu_items=None):
if menu_items is None: menu_items = []
if not isinstance(show['title'], basestring): show['title'] = ''
show['title'] = re.sub(' \(\d{4}\)$', '', show['title'])
label = '%s (%s)' % (show['title'], show['year'])
liz = utils.make_list_item(label, show)
slug = show['ids']['slug']
liz.setProperty('slug', slug)
people = trakt_api.get_people(section_params['section'], slug) if _SALTS.get_setting('include_people') == 'true' else None
info = utils.make_info(show, people=people)
if not section_params['folder']:
liz.setProperty('IsPlayable', 'true')
if 'TotalEpisodes' in info:
liz.setProperty('TotalEpisodes', str(info['TotalEpisodes']))
liz.setProperty('WatchedEpisodes', str(info['WatchedEpisodes']))
liz.setProperty('UnWatchedEpisodes', str(info['UnWatchedEpisodes']))
if section_params['section'] == SECTIONS.TV:
queries = {'mode': section_params['next_mode'], 'slug': slug, 'fanart': liz.getProperty('fanart_image')}
info['TVShowTitle'] = info['title']
else:
queries = {'mode': section_params['next_mode'], 'video_type': section_params['video_type'], 'title': show['title'], 'year': show['year'], 'slug': slug}
liz.setInfo('video', info)
liz_url = _SALTS.build_plugin_url(queries)
if section_params['next_mode'] == MODES.GET_SOURCES and _SALTS.get_setting('auto-play') == 'true':
queries = {'mode': MODES.SELECT_SOURCE, 'video_type': section_params['video_type'], 'title': show['title'], 'year': show['year'], 'slug': slug}
if _SALTS.get_setting('source-win') == 'Dialog':
runstring = 'PlayMedia(%s)' % _SALTS.build_plugin_url(queries)
else:
runstring = 'Container.Update(%s)' % _SALTS.build_plugin_url(queries)
menu_items.insert(0, ('Select Source', runstring),)
if section_params['next_mode'] == MODES.GET_SOURCES and _SALTS.get_setting('show_download') == 'true':
queries = {'mode': MODES.DOWNLOAD_SOURCE, 'video_type': section_params['video_type'], 'title': show['title'], 'year': show['year'], 'slug': slug}
if _SALTS.get_setting('source-win') == 'Dialog':
runstring = 'RunPlugin(%s)' % _SALTS.build_plugin_url(queries)
else:
runstring = 'Container.Update(%s)' % _SALTS.build_plugin_url(queries)
menu_items.append(('Download Source', runstring),)
if TOKEN:
show_id = utils.show_id(show)
if 'in_collection' in show and show['in_collection']:
queries = {'mode': MODES.REM_FROM_COLL, 'section': section_params['section']}
queries.update(show_id)
menu_items.append(('Remove from Collection', 'RunPlugin(%s)' % (_SALTS.build_plugin_url(queries))),)
else:
queries = {'mode': MODES.ADD_TO_COLL, 'section': section_params['section']}
queries.update(show_id)
menu_items.append(('Add to Collection', 'RunPlugin(%s)' % (_SALTS.build_plugin_url(queries))),)
queries = {'mode': MODES.ADD_TO_LIST, 'section': section_params['section']}
queries.update(show_id)
menu_items.append(('Add to List', 'RunPlugin(%s)' % (_SALTS.build_plugin_url(queries))),)
queries = {'mode': MODES.RATE, 'section': section_params['section']}
queries.update(show_id)
menu_items.append(('Rate on trakt.tv', 'RunPlugin(%s)' % (_SALTS.build_plugin_url(queries))),)
queries = {'mode': MODES.ADD_TO_LIBRARY, 'video_type': section_params['video_type'], 'title': show['title'], 'year': show['year'], 'slug': slug}
menu_items.append(('Add to Library', 'RunPlugin(%s)' % (_SALTS.build_plugin_url(queries))),)
if TOKEN:
if 'watched' in show and show['watched']:
watched = False
label = 'Mark as Unwatched'
else:
watched = True
label = 'Mark as Watched'
if watched or section_params['section'] == SECTIONS.MOVIES:
queries = {'mode': MODES.TOGGLE_WATCHED, 'section': section_params['section'], 'watched': watched}
queries.update(show_id)
menu_items.append((label, 'RunPlugin(%s)' % (_SALTS.build_plugin_url(queries))),)
if section_params['section'] == SECTIONS.TV and _SALTS.get_setting('enable-subtitles') == 'true':
queries = {'mode': MODES.EDIT_TVSHOW_ID, 'title': show['title'], 'year': show['year']}
runstring = 'RunPlugin(%s)' % _SALTS.build_plugin_url(queries)
menu_items.append(('Set Addic7ed TVShowID', runstring,))
if section_params['section'] == SECTIONS.TV:
if slug in utils.get_force_title_list():
label = 'Use Default episode matching'
else:
label = 'Use Episode Title matching'
queries = {'mode': MODES.TOGGLE_TITLE, 'slug': slug}
runstring = 'RunPlugin(%s)' % _SALTS.build_plugin_url(queries)
menu_items.append((label, runstring,))
queries = {'mode': MODES.SET_URL_SEARCH, 'video_type': section_params['video_type'], 'title': show['title'], 'year': show['year'], 'slug': slug}
menu_items.append(('Set Related Url (Search)', 'RunPlugin(%s)' % (_SALTS.build_plugin_url(queries))),)
queries = {'mode': MODES.SET_URL_MANUAL, 'video_type': section_params['video_type'], 'title': show['title'], 'year': show['year'], 'slug': slug}
menu_items.append(('Set Related Url (Manual)', 'RunPlugin(%s)' % (_SALTS.build_plugin_url(queries))),)
if len(menu_items) < 10 and 'trailer' in info:
queries = {'mode': MODES.PLAY_TRAILER, 'stream_url': info['trailer']}
menu_items.insert(-3, ('Play Trailer', 'RunPlugin(%s)' % (_SALTS.build_plugin_url(queries))),)
if len(menu_items) < 10:
menu_items.insert(0, ('Show Information', 'XBMC.Action(Info)'),)
liz.addContextMenuItems(menu_items, replaceItems=True)
liz.setProperty('resumetime', str(0))
liz.setProperty('totaltime', str(1))
return liz, liz_url
def main(argv=None):
if sys.argv: argv = sys.argv
log_utils.log('Version: |%s| Queries: |%s|' % (_SALTS.get_version(), _SALTS.queries))
log_utils.log('Args: |%s|' % (argv))
# don't process params that don't match our url exactly. (e.g. plugin://plugin.video.1channel/extrafanart)
plugin_url = 'plugin://%s/' % (_SALTS.get_id())
if argv[0] != plugin_url:
return
try:
mode = _SALTS.queries.get('mode', None)
url_dispatcher.dispatch(mode, _SALTS.queries)
except TransientTraktError as e:
log_utils.log(str(e), xbmc.LOGERROR)
builtin = 'XBMC.Notification(%s,%s, 5000, %s)'
xbmc.executebuiltin(builtin % (_SALTS.get_name(), str(e), ICON_PATH))
if __name__ == '__main__':
sys.exit(main())
| [
"[email protected]"
] | |
a87152f3a008ec4832177a68b36f5aa27bf07c1b | cda43bf6a84f7e55fab26aa70cda934683a51fe5 | /MainMajor/cifar_loader.py | fa597d04eb30e7dc0e3b560acb21758dfb94fd31 | [] | no_license | nikolaosdionelis/NeuralNetworksNNs | abb55622882e31c8d130a8986868b3d19ede186f | 8a217490ad5bb3f7fccf4002c6b43a06c1e562fc | refs/heads/master | 2022-11-13T00:50:23.578197 | 2020-07-12T18:52:20 | 2020-07-12T18:52:20 | 279,042,013 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,034 | py | """
Utilities for downloading and unpacking the CIFAR-10 dataset, originally published
by Krizhevsky et al. and hosted here: https://www.cs.toronto.edu/~kriz/cifar.html
"""
import os
import sys
import tarfile
from six.moves import urllib
import numpy as np
def maybe_download_and_extract(data_dir, url='http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'):
if not os.path.exists(os.path.join(data_dir, 'cifar-10-batches-py')):
if not os.path.exists(data_dir):
os.makedirs(data_dir)
filename = url.split('/')[-1]
filepath = os.path.join(data_dir, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\n>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(url, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(data_dir)
def unpickle(file):
fo = open(file, 'rb')
if (sys.version_info >= (3, 0)):
import pickle
d = pickle.load(fo, encoding='latin1')
else:
import cPickle
d = cPickle.load(fo)
fo.close()
return {'x': d['data'].reshape((10000,3,32,32)), 'y': np.array(d['labels']).astype(np.uint8)}
def load_cifar(data_dir="data/cifar_data/"):
if not os.path.exists(data_dir):
print('creating folder', data_dir)
os.makedirs(data_dir)
maybe_download_and_extract(data_dir)
train_data = [unpickle(os.path.join(data_dir,'cifar-10-batches-py','data_batch_' + str(i))) for i in range(1,6)]
skip_first_500 = [0 for x in range(10)]
trainx_list = []
trainy_list = []
valx_list = []
valy_list = []
for row in train_data:
for dx, dy in zip(row['x'], row['y']):
# print(d['y'])
if skip_first_500[dy] < 500:
valx_list.append(dx)
valy_list.append(dy)
skip_first_500[dy] += 1
continue
trainx_list.append(dx)
trainy_list.append(dy)
trainx = np.array(trainx_list)
trainy = np.array(trainy_list)
valx = np.array(valx_list)
valy = np.array(valy_list)
test_data = unpickle(os.path.join(data_dir,'cifar-10-batches-py','test_batch'))
testx = test_data['x']
testy = test_data['y']
trainx = trainx/255.0
valx = valx/255.0
testx = testx/255.0
print("max: " + str(np.amax(trainx)))
print("min: " + str(np.amin(trainx)))
print("max: " + str(np.amax(testx)))
print("min: " + str(np.amin(testx)))
print("max: " + str(np.amax(valx)))
print("min: " + str(np.amin(valx)))
# (N,3,32,32) -> (N,32,32,3)
return np.transpose(trainx, (0,2,3,1)), \
np.transpose(valx, (0,2,3,1)), \
np.transpose(testx, (0,2,3,1))
| [
"[email protected]"
] | |
96d43f1a35c9c9165c78ba179013dff2edcaf777 | 69dfda87224e60e2e6a6c69a393ff6f62d533049 | /common/metrics.py | 33f260401ceb6c34531f830b1b861ad8ab67f499 | [] | no_license | markmo/federated_trainer | 49d6de3ccff59575bddde6fc38845e2100c32990 | b2a1733a914e3c43d4ab7393fea30515389bf8a4 | refs/heads/master | 2020-05-19T07:10:38.553656 | 2019-05-11T10:46:00 | 2019-05-11T10:46:00 | 184,891,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | import numpy as np
def mean_squared_error(y_pred, y_true):
return np.mean((y_pred - y_true) ** 2)
| [
"[email protected]"
] | |
3ed68fbcf32ccdb5921f710be52a098dc739463d | c46754b9600a12df4f9d7a6320dfc19aa96b1e1d | /examples/research_projects/xtreme-s/run_xtreme_s.py | f00286f3d2b831ad00d7e351354baf7ed9f578c8 | [
"Apache-2.0"
] | permissive | huggingface/transformers | ccd52a0d7c59e5f13205f32fd96f55743ebc8814 | 4fa0aff21ee083d0197a898cdf17ff476fae2ac3 | refs/heads/main | 2023-09-05T19:47:38.981127 | 2023-09-05T19:21:33 | 2023-09-05T19:21:33 | 155,220,641 | 102,193 | 22,284 | Apache-2.0 | 2023-09-14T20:44:49 | 2018-10-29T13:56:00 | Python | UTF-8 | Python | false | false | 38,649 | py | #!/usr/bin/env python
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
""" Fine-tuning a 🤗 Transformers pretrained speech model on the XTREME-S benchmark tasks"""
import json
import logging
import os
import re
import sys
from collections import OrderedDict, defaultdict
from dataclasses import dataclass, field
from typing import Dict, List, Optional, Union
import datasets
import numpy as np
import torch
from datasets import DatasetDict, load_dataset, load_metric
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
AutoModelForCTC,
AutoModelForSpeechSeq2Seq,
AutoProcessor,
AutoTokenizer,
HfArgumentParser,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
Trainer,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.18.0.dev0")
require_version("datasets>=1.18.0", "To fix: pip install -r examples/pytorch/speech-recognition/requirements.txt")
logger = logging.getLogger(__name__)
def list_field(default=None, metadata=None):
return field(default_factory=lambda: default, metadata=metadata)
TASK_TO_TARGET_COLUMN_NAME = {
"fleurs-asr": "transcription",
"fleurs-lang_id": "lang_id",
"mls": "transcription",
"voxpopuli": "transcription",
"covost2": "translation",
"minds14": "intent_class",
"babel": "transcription",
}
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
tokenizer_name_or_path: Optional[str] = field(
default=None,
metadata={"help": "Path to pretrained tokenizer or tokenizer identifier from huggingface.co/models"},
)
cache_dir: Optional[str] = field(
default=None,
metadata={
"help": "Where do you want to store the pretrained models and datasets downloaded from huggingface.co"
},
)
freeze_feature_encoder: bool = field(
default=True, metadata={"help": "Whether to freeze the feature encoder layers of the model."}
)
attention_dropout: float = field(
default=0.0, metadata={"help": "The dropout ratio for the attention probabilities."}
)
activation_dropout: float = field(
default=0.0, metadata={"help": "The dropout ratio for activations inside the fully connected layer."}
)
feat_proj_dropout: float = field(default=0.0, metadata={"help": "The dropout ratio for the projected features."})
hidden_dropout: float = field(
default=0.0,
metadata={
"help": "The dropout probability for all fully connected layers in the embeddings, encoder, and pooler."
},
)
final_dropout: float = field(
default=0.0,
metadata={"help": "The dropout probability for the final projection layer."},
)
mask_time_prob: float = field(
default=0.05,
metadata={
"help": (
"Probability of each feature vector along the time axis to be chosen as the start of the vector"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"vectors will be masked along the time axis."
)
},
)
mask_time_length: int = field(
default=10,
metadata={"help": "Length of vector span to mask along the time axis."},
)
mask_feature_prob: float = field(
default=0.0,
metadata={
"help": (
"Probability of each feature vector along the feature axis to be chosen as the start of the vectorspan"
" to be masked. Approximately ``mask_feature_prob * sequence_length // mask_feature_length`` feature"
" bins will be masked along the time axis."
)
},
)
mask_feature_length: int = field(
default=10,
metadata={"help": "Length of vector span to mask along the feature axis."},
)
layerdrop: float = field(default=0.0, metadata={"help": "The LayerDrop probability."})
ctc_zero_infinity: bool = field(
default=False,
metadata={"help": "Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`."},
)
ctc_loss_reduction: Optional[str] = field(
default="mean", metadata={"help": "The way the ctc loss should be reduced. Should be one of 'mean' or 'sum'."}
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class
into argparse arguments to be able to specify them on
the command line.
"""
dataset_name: str = field(
default="google/xtreme_s",
metadata={"help": "The name of the dataset to use (via the datasets library). Defaults to 'google/xtreme_s'"},
)
task: str = field(
default=None,
metadata={
"help": (
"The task name of the benchmark to use (via the datasets library). Should be on of: "
"'fleurs-asr', 'mls', 'voxpopuli', 'covost2', 'minds14', 'fleurs-lang_id', 'babel'."
)
},
)
language: str = field(
default="all",
metadata={"help": "The language id as defined in the datasets config name or `all` for all languages."},
)
language_group: str = field(
default=None,
metadata={
"help": (
"The language group to select a subset of languages to train on. "
"This option is only used the 'fleurs-asr' task. Should be one of: "
"'western_european_we', 'eastern_european_ee', 'central_asia_middle_north_african_cmn', "
"'sub_saharan_african_ssa', 'south_asian_sa', 'south_east_asian_sea', 'chinese_japanase_korean_cjk'."
)
},
)
train_split_name: str = field(
default="train",
metadata={
"help": "The name of the training dataset split to use (via the datasets library). Defaults to 'train'"
},
)
eval_split_name: str = field(
default="validation",
metadata={
"help": (
"The name of the evaluation dataset split to use (via the datasets library). Defaults to 'validation'"
)
},
)
predict_split_name: str = field(
default="test",
metadata={
"help": "The name of the prediction dataset split to use (via the datasets library). Defaults to 'test'"
},
)
audio_column_name: str = field(
default="audio",
metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"},
)
target_column_name: str = field(
default=None,
metadata={
"help": (
"The name of the dataset column containing the target data (transcription/translation/label). If None,"
" the name will be inferred from the task. Defaults to None."
)
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
)
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
},
)
chars_to_ignore: Optional[List[str]] = list_field(
default=', ? . ! - ; : " “ % ‘ ” �'.split(" "),
metadata={"help": "A list of characters to remove from the transcripts."},
)
max_duration_in_seconds: float = field(
default=30.0,
metadata={
"help": (
"Filter audio files that are longer than `max_duration_in_seconds` seconds to"
" 'max_duration_in_seconds`"
)
},
)
min_duration_in_seconds: float = field(
default=0.0, metadata={"help": "Filter audio files that are shorter than `min_duration_in_seconds` seconds"}
)
preprocessing_only: bool = field(
default=False,
metadata={
"help": (
"Whether to only do data preprocessing and skip training. This is especially useful when data"
" preprocessing errors out in distributed training due to timeout. In this case, one should run the"
" preprocessing in a non-distributed setup with `preprocessing_only=True` so that the cached datasets"
" can consequently be loaded in distributed training"
)
},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": (
"If :obj:`True`, will use the token generated when running"
":obj:`huggingface-cli login` as HTTP bearer authorization for remote files."
)
},
)
unk_token: str = field(
default="[UNK]",
metadata={"help": "The unk token for the tokenizer"},
)
pad_token: str = field(
default="[PAD]",
metadata={"help": "The padding token for the tokenizer"},
)
word_delimiter_token: str = field(
default="|",
metadata={"help": "The word delimiter token for the tokenizer"},
)
phoneme_language: Optional[str] = field(
default=None,
metadata={
"help": (
"The target language that should be used be"
" passed to the tokenizer for tokenization. Note that"
" this is only relevant if the model classifies the"
" input audio to a sequence of phoneme sequences."
)
},
)
per_lang_metrics: bool = field(
default=True,
metadata={
"help": (
"If `True`, compute the test metrics separately for each language, and average the results. "
"If `False` compute the average test metrics in a single pass for all languages at once."
)
},
)
@dataclass
class SpeechDataCollatorWithPadding:
processor: AutoProcessor
decoder_start_token_id: Optional[int] = None
padding: Union[bool, str] = "longest"
pad_labels: Optional[int] = True
pad_to_multiple_of: Optional[int] = None
pad_to_multiple_of_labels: Optional[int] = None
def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
# split inputs and labels since they have to be of different lengths and need
# different padding methods
input_features = [{"input_values": feature["input_values"]} for feature in features]
batch = self.processor.pad(
input_features,
padding=self.padding,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors="pt",
)
if self.pad_labels:
label_features = [{"input_ids": feature["labels"]} for feature in features]
labels_batch = self.processor.pad(
labels=label_features,
padding=self.padding,
pad_to_multiple_of=self.pad_to_multiple_of_labels,
return_tensors="pt",
)
# replace padding with -100 to ignore loss correctly
labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100)
# if bos token is appended in previous tokenization step,
# cut bos token here as it's append later anyways
if (
self.decoder_start_token_id is not None
and (labels[:, 0] == self.decoder_start_token_id).all().cpu().item()
):
labels = labels[:, 1:]
batch["labels"] = labels
else:
batch["labels"] = torch.tensor([feature["labels"] for feature in features])
return batch
def create_vocabulary_from_data(
datasets: DatasetDict,
word_delimiter_token: Optional[str] = None,
unk_token: Optional[str] = None,
pad_token: Optional[str] = None,
):
# Given training and test labels create vocabulary
def extract_all_chars(batch):
all_text = " ".join(batch["target_text"])
vocab = list(set(all_text))
return {"vocab": [vocab], "all_text": [all_text]}
vocabs = datasets.map(
extract_all_chars,
batched=True,
batch_size=-1,
keep_in_memory=True,
remove_columns=datasets["train"].column_names,
)
# take union of all unique characters in each dataset
vocab_set = (
(set(vocabs["train"]["vocab"][0]) if "train" in vocabs else set())
| (set(vocabs["eval"]["vocab"][0]) if "eval" in vocabs else set())
| (set(vocabs["predict"]["vocab"][0]) if "predict" in vocabs else set())
)
vocab_dict = {v: k for k, v in enumerate(sorted(vocab_set))}
# replace white space with delimiter token
if word_delimiter_token is not None:
vocab_dict[word_delimiter_token] = vocab_dict[" "]
del vocab_dict[" "]
# add unk and pad token
if unk_token is not None:
vocab_dict[unk_token] = len(vocab_dict)
if pad_token is not None:
vocab_dict[pad_token] = len(vocab_dict)
return vocab_dict
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s", training_args)
# Set seed before initializing model.
set_seed(training_args.seed)
# 1. First, let's load the dataset
raw_datasets = DatasetDict()
task_name = data_args.task
lang_id = data_args.language
if task_name is None:
raise ValueError(
"Set --task should be set to '<xtreme_s_task>' (e.g. 'fleurs-asr', 'mls', 'covost2', 'minds14') "
)
if lang_id is None:
raise ValueError(
"Set --language should be set to the language id of the sub dataset "
"config to be used (e.g. 'pl', 'en.tr', 'fr-FR') or 'all'"
" for multi-lingual fine-tuning."
)
if data_args.language_group is not None:
if data_args.task != "fleurs-asr":
raise ValueError("--language_group should only be used with --task=fleurs-asr")
if data_args.language != "all":
raise ValueError("--language_group should only be used with --language=all")
if data_args.target_column_name is None:
target_column_name = TASK_TO_TARGET_COLUMN_NAME[task_name]
else:
target_column_name = data_args.target_column_name
# here we differentiate between tasks with text as the target and classification tasks
is_text_target = target_column_name in ("transcription", "translation")
config_name = ".".join([task_name.split("-")[0], lang_id])
if training_args.do_train:
raw_datasets["train"] = load_dataset(
data_args.dataset_name,
config_name,
split=data_args.train_split_name,
use_auth_token=data_args.use_auth_token,
cache_dir=model_args.cache_dir,
)
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"--audio_column_name '{data_args.audio_column_name}' not found in dataset '{data_args.dataset_name}'."
" Make sure to set `--audio_column_name` to the correct audio column - one of"
f" {', '.join(raw_datasets['train'].column_names)}."
)
if target_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"--target_column_name {target_column_name} not found in dataset '{data_args.dataset_name}'. "
"Make sure to set `--target_column_name` to the correct text column - one of "
f"{', '.join(raw_datasets['train'].column_names)}."
)
if data_args.max_train_samples is not None:
raw_datasets["train"] = raw_datasets["train"].select(range(data_args.max_train_samples))
if training_args.do_eval:
raw_datasets["eval"] = load_dataset(
data_args.dataset_name,
config_name,
split=data_args.eval_split_name,
use_auth_token=data_args.use_auth_token,
cache_dir=model_args.cache_dir,
)
if data_args.max_eval_samples is not None:
raw_datasets["eval"] = raw_datasets["eval"].select(range(data_args.max_eval_samples))
if training_args.do_predict:
raw_datasets["predict"] = load_dataset(
data_args.dataset_name,
config_name,
split=data_args.predict_split_name,
use_auth_token=data_args.use_auth_token,
cache_dir=model_args.cache_dir,
)
if data_args.max_predict_samples is not None:
raw_datasets["predict"] = raw_datasets["predict"].select(range(data_args.max_predict_samples))
lang_list = next(iter(raw_datasets.values())).features["lang_id"].names
if not is_text_target:
label_list = next(iter(raw_datasets.values())).features[target_column_name].names
num_labels = len(label_list)
num_workers = data_args.preprocessing_num_workers
lang_group = data_args.language_group
if lang_group is not None:
with training_args.main_process_first(desc="language group filter"):
lang_group_id = next(iter(raw_datasets.values())).features["lang_group_id"].str2int(lang_group)
raw_datasets = raw_datasets.filter(
lambda lang_group: lang_group == lang_group_id,
num_proc=num_workers,
input_columns=["lang_group_id"],
)
# 2. We remove some special characters from the datasets
# that make training complicated and do not help in transcribing the speech
# E.g. characters, such as `,` and `.` do not really have an acoustic characteristic
# that could be easily picked up by the model
chars_to_ignore_regex = (
f'[{"".join(data_args.chars_to_ignore)}]' if data_args.chars_to_ignore is not None else None
)
def remove_special_characters(batch):
if chars_to_ignore_regex is not None:
batch["target_text"] = re.sub(chars_to_ignore_regex, "", batch[target_column_name]).lower() + " "
else:
batch["target_text"] = batch[target_column_name].lower() + " "
return batch
if is_text_target:
with training_args.main_process_first(desc="dataset map special characters removal"):
raw_datasets = raw_datasets.map(
remove_special_characters,
remove_columns=[target_column_name],
desc="remove special characters from datasets",
)
# save special tokens for tokenizer
word_delimiter_token = data_args.word_delimiter_token
unk_token = data_args.unk_token
pad_token = data_args.pad_token
# 3. Next, let's load the config as we might need it to create
# the tokenizer
config = AutoConfig.from_pretrained(
model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_auth_token=data_args.use_auth_token
)
if is_text_target:
# 4. (Optional, for ASR and translation) If no tokenizer file is defined,
# we create the vocabulary of the model by extracting all unique characters from
# the training and evaluation datasets
# We need to make sure that only first rank saves vocabulary
# make sure all processes wait until vocab is created
tokenizer_name_or_path = model_args.tokenizer_name_or_path
tokenizer_kwargs = {}
if tokenizer_name_or_path is None:
# save vocab in training output dir
tokenizer_name_or_path = training_args.output_dir
vocab_file = os.path.join(tokenizer_name_or_path, "vocab.json")
with training_args.main_process_first():
if training_args.overwrite_output_dir and os.path.isfile(vocab_file):
os.remove(vocab_file)
with training_args.main_process_first(desc="dataset map vocabulary creation"):
if not os.path.isfile(vocab_file):
os.makedirs(tokenizer_name_or_path, exist_ok=True)
vocab_dict = create_vocabulary_from_data(
raw_datasets,
word_delimiter_token=word_delimiter_token,
unk_token=unk_token,
pad_token=pad_token,
)
# save vocab dict to be loaded into tokenizer
with open(vocab_file, "w") as file:
json.dump(vocab_dict, file)
# if tokenizer has just been created
# it is defined by `tokenizer_class` if present in config else by `model_type`
if not config.is_encoder_decoder:
tokenizer_kwargs = {
"config": config if config.tokenizer_class is not None else None,
"tokenizer_type": config.model_type if config.tokenizer_class is None else None,
"unk_token": unk_token,
"pad_token": pad_token,
"word_delimiter_token": word_delimiter_token,
}
else:
tokenizer_kwargs = {}
# 5. Now we can instantiate the feature extractor, tokenizer and model
# Note for distributed training, the .from_pretrained methods guarantee that only
# one local process can concurrently download model & vocab.
# load feature_extractor and tokenizer
if is_text_target:
tokenizer = AutoTokenizer.from_pretrained(
tokenizer_name_or_path,
use_auth_token=data_args.use_auth_token,
**tokenizer_kwargs,
)
feature_extractor = AutoFeatureExtractor.from_pretrained(
model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_auth_token=data_args.use_auth_token
)
# adapt config
# (speech translation requires pre-configured seq2seq models)
if task_name != "covost2":
config.update(
{
"feat_proj_dropout": model_args.feat_proj_dropout,
"attention_dropout": model_args.attention_dropout,
"hidden_dropout": model_args.hidden_dropout,
"final_dropout": model_args.final_dropout,
"mask_time_prob": model_args.mask_time_prob,
"mask_time_length": model_args.mask_time_length,
"mask_feature_prob": model_args.mask_feature_prob,
"mask_feature_length": model_args.mask_feature_length,
"gradient_checkpointing": training_args.gradient_checkpointing,
"layerdrop": model_args.layerdrop,
"ctc_zero_infinity": model_args.ctc_zero_infinity,
"ctc_loss_reduction": model_args.ctc_loss_reduction,
"activation_dropout": model_args.activation_dropout,
}
)
if training_args.do_train:
if is_text_target:
config.pad_token_id = tokenizer.pad_token_id
config.vocab_size = len(tokenizer)
else:
label_to_id = {v: i for i, v in enumerate(label_list)}
config.label2id = label_to_id
config.id2label = {id: label for label, id in label_to_id.items()}
config.num_labels = num_labels
# create model
if target_column_name == "transcription":
model = AutoModelForCTC.from_pretrained(
model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
config=config,
use_auth_token=data_args.use_auth_token,
)
elif config.is_encoder_decoder:
model = AutoModelForSpeechSeq2Seq.from_pretrained(
model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
config=config,
use_auth_token=data_args.use_auth_token,
)
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined")
else:
model = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
config=config,
use_auth_token=data_args.use_auth_token,
)
# freeze encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
# 6. Now we preprocess the datasets including loading the audio, resampling and normalization
# Thankfully, `datasets` takes care of automatically loading and resampling the audio,
# so that we just need to set the correct target sampling rate and normalize the input
# via the `feature_extractor`
# make sure that dataset decodes audio with correct sampling rate
dataset_sampling_rate = next(iter(raw_datasets.values())).features[data_args.audio_column_name].sampling_rate
if dataset_sampling_rate != feature_extractor.sampling_rate:
raw_datasets = raw_datasets.cast_column(
data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate)
)
# derive max & min input length for sample rate & max duration
max_input_length = data_args.max_duration_in_seconds * feature_extractor.sampling_rate
min_input_length = data_args.min_duration_in_seconds * feature_extractor.sampling_rate
audio_column_name = data_args.audio_column_name
# `phoneme_language` is only relevant if the model is fine-tuned on phoneme classification
phoneme_language = data_args.phoneme_language
# Preprocessing the datasets.
# We need to read the audio files as arrays and tokenize the targets.
def prepare_dataset(batch):
# load audio
sample = batch[audio_column_name]
inputs = feature_extractor(sample["array"], sampling_rate=sample["sampling_rate"])
batch["input_values"] = inputs.input_values[0]
batch["length"] = len(batch["input_values"])
# encode targets
additional_kwargs = {}
if phoneme_language is not None:
additional_kwargs["phonemizer_lang"] = phoneme_language
if is_text_target:
batch["labels"] = tokenizer(batch["target_text"], **additional_kwargs).input_ids
else:
batch["labels"] = batch[target_column_name]
batch["lang"] = batch["lang_id"]
return batch
with training_args.main_process_first(desc="dataset map preprocessing"):
vectorized_datasets = raw_datasets.map(
prepare_dataset,
remove_columns=next(iter(raw_datasets.values())).column_names,
num_proc=num_workers,
desc="preprocess datasets",
)
if training_args.do_train:
def is_audio_in_length_range(length):
return length > min_input_length and length < max_input_length
# filter data that is shorter than min_input_length
vectorized_datasets["train"] = vectorized_datasets["train"].filter(
is_audio_in_length_range,
num_proc=num_workers,
input_columns=["length"],
)
# 7. Next, we can prepare for the training step.
# Let's use the appropriate XTREME-S evaluation metric,
# instantiate a data collator and the trainer
# Define evaluation metrics during training, *i.e.* word error rate, character error rate
eval_metric = load_metric("xtreme_s", task_name)
# for large datasets it is advised to run the preprocessing on a
# single machine first with ``args.preprocessing_only`` since there will mostly likely
# be a timeout when running the script in distributed mode.
# In a second step ``args.preprocessing_only`` can then be set to `False` to load the
# cached dataset
if data_args.preprocessing_only:
logger.info(f"Data preprocessing finished. Files cached at {vectorized_datasets.cache_files}")
return
def asr_logits_argmax(logits, labels):
return logits.argmax(dim=-1)
def compute_asr_metric(pred):
pred.label_ids[pred.label_ids == -100] = tokenizer.pad_token_id
pred_str = tokenizer.batch_decode(pred.predictions)
# we do not want to group tokens when computing the metrics
label_str = tokenizer.batch_decode(pred.label_ids, group_tokens=False)
metric = eval_metric.compute(predictions=pred_str, references=label_str)
return metric
def compute_classification_metric(pred):
pred_ids = np.argmax(pred.predictions, axis=1)
metric = eval_metric.compute(predictions=pred_ids, references=pred.label_ids)
return metric
# Now save everything to be able to create a single processor later
if is_main_process(training_args.local_rank):
# save feature extractor, tokenizer and config
feature_extractor.save_pretrained(training_args.output_dir)
if is_text_target:
tokenizer.save_pretrained(training_args.output_dir)
config.save_pretrained(training_args.output_dir)
# wait until configs are saved in the main process before loading the processor
if training_args.local_rank != -1:
torch.distributed.barrier()
if is_text_target:
processor = AutoProcessor.from_pretrained(training_args.output_dir)
else:
processor = AutoFeatureExtractor.from_pretrained(training_args.output_dir)
# Instantiate custom data collator
data_collator = SpeechDataCollatorWithPadding(processor=processor, pad_labels=is_text_target)
# Initialize Trainer
if target_column_name == "translation":
trainer = Seq2SeqTrainer(
model=model,
data_collator=data_collator,
args=training_args,
preprocess_logits_for_metrics=asr_logits_argmax if training_args.predict_with_generate else None,
compute_metrics=compute_asr_metric if training_args.predict_with_generate else None,
train_dataset=vectorized_datasets["train"] if training_args.do_train else None,
eval_dataset=vectorized_datasets["eval"] if training_args.do_eval else None,
tokenizer=feature_extractor,
)
else:
trainer = Trainer(
model=model,
data_collator=data_collator,
args=training_args,
preprocess_logits_for_metrics=asr_logits_argmax if is_text_target else None,
compute_metrics=compute_asr_metric if is_text_target else compute_classification_metric,
train_dataset=vectorized_datasets["train"] if training_args.do_train else None,
eval_dataset=vectorized_datasets["eval"] if training_args.do_eval else None,
tokenizer=feature_extractor,
)
# 8. Finally, we can start training
# Training
if training_args.do_train:
# use last checkpoint if exist
if last_checkpoint is not None:
checkpoint = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path):
checkpoint = model_args.model_name_or_path
else:
checkpoint = None
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model()
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples
if data_args.max_train_samples is not None
else len(vectorized_datasets["train"])
)
metrics["train_samples"] = min(max_train_samples, len(vectorized_datasets["train"]))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation on the test set
results = {}
if training_args.do_predict:
logger.info(f"*** Evaluating on the `{data_args.predict_split_name}` set ***")
if data_args.per_lang_metrics:
# separate the `test` dataset into language-specific subsets and compute metrics for each of them
metrics = {}
average_metrics = defaultdict(list)
for lang_id in range(len(lang_list)):
lang_name = lang_list[lang_id]
with training_args.main_process_first(desc="per-language dataset filter"):
lang_dataset = vectorized_datasets["predict"].filter(
lambda lang: lang == lang_id,
num_proc=num_workers,
input_columns=["lang"],
)
lang_metrics = trainer.evaluate(lang_dataset)
redundant_metrics = ["eval_runtime", "eval_samples_per_second", "eval_steps_per_second", "eval_epoch"]
for metric_name, value in lang_metrics.items():
average_metrics[metric_name].append(value)
if metric_name not in redundant_metrics:
metrics[f"{metric_name}_{lang_name}"] = value
for metric_name, value in average_metrics.items():
metrics[metric_name] = np.mean(value)
else:
metrics = trainer.evaluate(vectorized_datasets["predict"])
max_predict_samples = (
data_args.max_predict_samples
if data_args.max_predict_samples is not None
else len(vectorized_datasets["predict"])
)
metrics["predict_samples"] = min(max_predict_samples, len(vectorized_datasets["predict"]))
# make sure that the `predict` metrics end up in the log history for the model card
trainer.log(OrderedDict(sorted(metrics.items())))
trainer.log_metrics("predict", metrics)
trainer.save_metrics("predict", metrics)
# Write model card and (optionally) push to hub
kwargs = {
"finetuned_from": model_args.model_name_or_path,
"tasks": task_name,
"tags": [task_name, data_args.dataset_name],
"dataset_args": (
f"Config: {config_name}, Training split: {data_args.train_split_name}, Eval split:"
f" {data_args.eval_split_name}, Predict split: {data_args.predict_split_name}"
),
"dataset": f"{data_args.dataset_name.upper()} - {config_name.upper()}",
"language": data_args.language,
}
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
return results
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
c377555c2f0b5965254d6b7ac73814194212cf28 | 2dc17d12ff6ea9794177c81aa4f385e4e09a4aa5 | /archive/298BinaryTreeLongestConsecutiveSequence.py | 92d9aa4d93f3055c85599dd48eacc42d05e24958 | [] | no_license | doraemon1293/Leetcode | 924b19f840085a80a9e8c0092d340b69aba7a764 | 48ba21799f63225c104f649c3871444a29ab978a | refs/heads/master | 2022-10-01T16:20:07.588092 | 2022-09-08T02:44:56 | 2022-09-08T02:44:56 | 122,086,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,411 | py | # coding=utf-8
'''
Created on 2017�?6�?16�?
@author: Administrator
'''
from data_structure.Tree import list_to_tree
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def longestConsecutive(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if root == None: return 0
self.ans = 0
self.memo = {}
def foo(node):
res = 1
if node.left:
left = self.memo(node.left) if node.left in self.memo else foo(node.left)
else:
left = 0
if node.right:
right = self.memo(node.right) if node.right in self.memo else foo(node.right)
else:
right = 0
if left and node.left.val == node.val + 1:
res = max(res, left + 1)
if right and node.right.val == node.val + 1:
res = max(res, right + 1)
self.ans = max(self.ans, res)
self.memo[node] = res
return res
foo(root)
# for k, v in self.memo.items():
# print k.val, v
# print self.memo
return self.ans
root = list_to_tree([1, None, 3, 2, 4, None, None, None, 5])
print Solution().longestConsecutive(root)
| [
"[email protected]"
] | |
31179b3b337c588adca95e89088525bd446ce1e8 | b01182728eb4a657f9bd8974ba8508010cc8173e | /manage.py | 314bde693826fb496c2d2cd9215db691ca17844c | [
"BSD-3-Clause"
] | permissive | pombredanne/django-buildbot | 459a6d8a942c8c5f857626f4772010a3aaba2a6e | 660ec35473aec081428a115eab6460f5a1cd2a0d | refs/heads/master | 2021-01-17T19:57:01.296286 | 2014-07-29T06:25:03 | 2014-07-29T06:25:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djbuildbot.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
36c7630adaee0011f298f2b5daaa682935d10da2 | 76b983258793d294b81791ebe72591bfebf78625 | /tools/pesearch.py | d6b57f8548ff6441b7acce963e476872846cc4ae | [
"BSD-2-Clause"
] | permissive | lotusexpeditor/syringe | 18ac9cb800a7fefb7d67e31936db6a84e47df9eb | 34a8386b90f534f9a856d0a436bba04dbf5100bd | refs/heads/master | 2023-02-08T10:08:20.295797 | 2020-12-27T00:06:09 | 2020-12-27T00:06:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,743 | py | import os,sys
import pecoff
from ptypes import utils
def traverse_address(address):
def edges(self, address=address, **kwargs):
if not isinstance(self, ptypes.ptype.container):
return
for item in self.value:
if item.contains(address):
yield item
continue
return
return edges
def rightjustify(string, size, space=' '):
# i don't know if python has support for right justification with character
# filling. their fmtstring doesn't seem to..
diff = size - len(string)
if diff > 0:
string = space*diff + string
return string
def processexecutable(filename, address):
# globals for interpretive use
global mz,pe,imagebase,sections,datadirectory
print('Query: %x\n'% address)
print('Module: %s'% os.path.basename(filename))
print('ImageBase: %x'% imagebase)
# try exe header first
mz.setoffset(imagebase, recurse=True)
if mz.contains(address):
result = mz
for item in mz.traverse(traverse_address(address)):
x = item.__name__
print(rightjustify('------- %s'%x, 70, '-'))
result = result[int(x) if isinstance(result, ptypes.parray.type) else x]
print(result)
# try sections
else:
mz.setoffset(0,recurse=True)
va = address - imagebase
s = pe['Sections'].getsectionbyaddress(va)
offset = va - s['VirtualAddress'].int()
data = s['PointerToRawData'].d.load().serialize()
left = offset - 8
left &= ~0xf
right = left+0x30
if left < 0: left = 0
if right > len(data): right = len(data)
sectionname = s['Name'].get()
print(rightjustify(' section %s'% sectionname, 76, '-'))
print(utils.hexdump(data[left:right], offset=s['VirtualAddress'].int()+offset+imagebase))
mz.setoffset(0, recurse=True)
return
from ptypes import ptype
def dumpcontainer(pc, indent=''):
if isinstance(pc.value, list):
for p in pc.value:
a = p.getoffset()
range ='%x-%x'% (a, a+p.size())
sym = '%s -> %s'%( p.__name__, p.__class__.__name__)
r = repr(p.serialize())
if not isinstance(p.value, list):
print(indent, range, sym, ' | ', r)
continue
print(indent, range, sy)
dumpcontainer(p, indent+' ')
pass
return
def dumpexecutable(filename):
# globals for interpretive use
global mz,pe,imagebase,sections,datadirectory,imports
print('Module: %s'% os.path.basename(filename))
print('ImageBase: %x'% imagebase)
print('Imports: %s'% ', '.join([x['Name'].d.l.str() for x in imports.l[:-1]]))
mz.setoffset(imagebase,recurse=True)
print(pe)
for x in sections:
name = x['Name'].str()
address = x['VirtualAddress'].int() + imagebase
print(x['Name'].str(), hex(address), hex(address + x.getloadedsize()))
mz.setoffset(0,recurse=True)
return
def dumpversion(filename):
global mz,pe,imagebase,sections,datadirectory,imports
opt = pe['OptionalHeader']
print('OperatingSystem', float('%d.%d'%(opt['MajorOperatingSystemVersion'].int(), opt['MinorOperatingSystemVersion'].int())))
print('ImageVersion', float('%d.%d'%(opt['MajorImageVersion'].int(), opt['MinorImageVersion'].int())))
print('SubsystemVersion', float('%d.%d'%(opt['MajorSubsystemVersion'].int(), opt['MinorSubsystemVersion'].int())))
print(opt['Win32VersionValue'])
global root
rsrc = datadirectory[2]
root = rsrc['VirtualAddress'].d.l
global a,b
a,b = root['Ids']
if __name__ == '__main__':
import sys, ptypes
zerobase = False
if '-z' in sys.argv:
i = sys.argv.index('-z')
sys.argv.pop(i)
zerobase = True
try:
filename = sys.argv[1]
except:
print('Usage: %s [-z] filename [hexaddress]'% sys.argv[0])
sys.exit(0)
if not os.path.exists(filename):
raise OSError("The specified file ({:s}) does not exist.".format(filename))
source = ptypes.provider.file(filename)
mz = pecoff.Executable.File(source=source).l
pe = mz['Next']['Header']
sections = pe['Sections']
imagebase = pe['OptionalHeader']['ImageBase'].int()
datadirectory = pe['DataDirectory']
if zerobase:
imagebase = 0
imports = datadirectory[1]['Address'].d.l
if len(sys.argv) == 2:
dumpexecutable(filename)
elif len(sys.argv) == 3:
address = int(sys.argv[2], 16)
try:
processexecutable(filename, address)
except KeyError:
print('address %x not found in %s'% (address, filename))
pass
| [
"[email protected]"
] | |
e38298b3db6bf24895dd9139bc51769fbef8cd78 | 37f10a692d0e1a9a396f505af60b04f7db44d3e1 | /01 Algorithms/01 Warmup/CompareTheTriplets.py | 9b18b2dcebec59ded6a89ae7c2987fd725d10ec4 | [] | no_license | nbrahman/HackerRank | 95842f5dbda2ab8aedc7808831c12b9f92a37e03 | ec5d42d7e578f01efba87a099f42e82512704dca | refs/heads/master | 2021-01-19T11:53:59.685444 | 2017-07-29T16:04:21 | 2017-07-29T16:04:21 | 88,003,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,616 | py | '''
Alice and Bob each created one problem for HackerRank. A reviewer rates the two challenges, awarding points on a scale from to for three categories: problem clarity, originality, and difficulty.
We define the rating for Alice's challenge to be the triplet , and the rating for Bob's challenge to be the triplet .
Your task is to find their comparison scores by comparing with , with , and with .
If , then Alice is awarded point.
If , then Bob is awarded point.
If , then neither person receives a point.
Given and , can you compare the two challenges and print their respective comparison points?
Input Format
The first line contains space-separated integers, , , and , describing the respective values in triplet .
The second line contains space-separated integers, , , and , describing the respective values in triplet .
Constraints
Output Format
Print two space-separated integers denoting the respective comparison scores earned by Alice and Bob.
Sample Input
5 6 7
3 6 10
Sample Output
1 1
Explanation
In this example:
Now, let's compare each individual score:
, so Alice receives point.
, so nobody receives a point.
, so Bob receives point.
Alice's comparison score is , and Bob's comparison score is . Thus, we print 1 1 (Alice's comparison score followed by Bob's comparison score) on a single line.
'''
if __name__ == '__main__':
A = list(map(int, input().strip().split()))
B = list(map(int, input().strip().split()))
Ascore = sum([1 for a, b in zip(A, B) if a > b])
Bscore = sum([1 for a, b in zip(A, B) if b > a])
print(Ascore, Bscore) | [
"[email protected]"
] | |
8064b3ab94bba0206c0df6abb5ec6f15205ccb42 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/ml/azure-ai-ml/azure/ai/ml/_restclient/v2023_06_01_preview/operations/_registry_environment_containers_operations.py | 3d50fb87236f18b2d65c3947e9abaf674c8455ba | [
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-python-cwi",
"LGPL-2.1-or-later",
"PSF-2.0",
"LGPL-2.0-or-later",
"GPL-3.0-or-later",
"GPL-1.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"Python-2.0",
"MPL-2.0",
"LicenseRef-scancode-other-copyleft",
"HPND",
"ODbL-1.0",
"GPL-3.0-only",
"ZPL-2.1",
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 29,392 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from msrest import Serializer
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
# fmt: off
def build_list_request(
subscription_id, # type: str
resource_group_name, # type: str
registry_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2023-06-01-preview") # type: str
skip = kwargs.pop('skip', None) # type: Optional[str]
list_view_type = kwargs.pop('list_view_type', None) # type: Optional[Union[str, "_models.ListViewType"]]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/environments") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"registryName": _SERIALIZER.url("registry_name", registry_name, 'str', pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{2,32}$'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
if skip is not None:
_query_parameters['$skip'] = _SERIALIZER.query("skip", skip, 'str')
if list_view_type is not None:
_query_parameters['listViewType'] = _SERIALIZER.query("list_view_type", list_view_type, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
def build_delete_request_initial(
subscription_id, # type: str
resource_group_name, # type: str
registry_name, # type: str
environment_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2023-06-01-preview") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/environments/{environmentName}") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"registryName": _SERIALIZER.url("registry_name", registry_name, 'str', pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{2,32}$'),
"environmentName": _SERIALIZER.url("environment_name", environment_name, 'str', pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
def build_get_request(
subscription_id, # type: str
resource_group_name, # type: str
registry_name, # type: str
environment_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2023-06-01-preview") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/environments/{environmentName}") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"registryName": _SERIALIZER.url("registry_name", registry_name, 'str', pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{2,32}$'),
"environmentName": _SERIALIZER.url("environment_name", environment_name, 'str', pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
def build_create_or_update_request_initial(
subscription_id, # type: str
resource_group_name, # type: str
registry_name, # type: str
environment_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2023-06-01-preview") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/environments/{environmentName}") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"registryName": _SERIALIZER.url("registry_name", registry_name, 'str', pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{2,32}$'),
"environmentName": _SERIALIZER.url("environment_name", environment_name, 'str', pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
_header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
# fmt: on
class RegistryEnvironmentContainersOperations(object):
"""RegistryEnvironmentContainersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.machinelearningservices.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name, # type: str
registry_name, # type: str
skip=None, # type: Optional[str]
list_view_type=None, # type: Optional[Union[str, "_models.ListViewType"]]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.EnvironmentContainerResourceArmPaginatedResult"]
"""List environment containers.
List environment containers.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param registry_name: Name of Azure Machine Learning registry. This is case-insensitive.
:type registry_name: str
:param skip: Continuation token for pagination.
:type skip: str
:param list_view_type: View type for including/excluding (for example) archived entities.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either EnvironmentContainerResourceArmPaginatedResult or
the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.EnvironmentContainerResourceArmPaginatedResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2023-06-01-preview") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.EnvironmentContainerResourceArmPaginatedResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
api_version=api_version,
skip=skip,
list_view_type=list_view_type,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
api_version=api_version,
skip=skip,
list_view_type=list_view_type,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("EnvironmentContainerResourceArmPaginatedResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/environments"} # type: ignore
def _delete_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name, # type: str
registry_name, # type: str
environment_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2023-06-01-preview") # type: str
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
environment_name=environment_name,
api_version=api_version,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 202:
response_headers['x-ms-async-operation-timeout']=self._deserialize('duration', response.headers.get('x-ms-async-operation-timeout'))
response_headers['Location']=self._deserialize('str', response.headers.get('Location'))
response_headers['Retry-After']=self._deserialize('int', response.headers.get('Retry-After'))
if cls:
return cls(pipeline_response, None, response_headers)
_delete_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/environments/{environmentName}"} # type: ignore
@distributed_trace
def begin_delete( # pylint: disable=inconsistent-return-statements
self,
resource_group_name, # type: str
registry_name, # type: str
environment_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Delete container.
Delete container.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param registry_name: Name of Azure Machine Learning registry. This is case-insensitive.
:type registry_name: str
:param environment_name: Container name.
:type environment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2023-06-01-preview") # type: str
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
environment_name=environment_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/environments/{environmentName}"} # type: ignore
@distributed_trace
def get(
self,
resource_group_name, # type: str
registry_name, # type: str
environment_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.EnvironmentContainer"
"""Get container.
Get container.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param registry_name: Name of Azure Machine Learning registry. This is case-insensitive.
:type registry_name: str
:param environment_name: Container name. This is case-sensitive.
:type environment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: EnvironmentContainer, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.EnvironmentContainer
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.EnvironmentContainer"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2023-06-01-preview") # type: str
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
environment_name=environment_name,
api_version=api_version,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('EnvironmentContainer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/environments/{environmentName}"} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
registry_name, # type: str
environment_name, # type: str
body, # type: "_models.EnvironmentContainer"
**kwargs # type: Any
):
# type: (...) -> "_models.EnvironmentContainer"
cls = kwargs.pop('cls', None) # type: ClsType["_models.EnvironmentContainer"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2023-06-01-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(body, 'EnvironmentContainer')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
environment_name=environment_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 200:
deserialized = self._deserialize('EnvironmentContainer', pipeline_response)
if response.status_code == 201:
response_headers['x-ms-async-operation-timeout']=self._deserialize('duration', response.headers.get('x-ms-async-operation-timeout'))
response_headers['Azure-AsyncOperation']=self._deserialize('str', response.headers.get('Azure-AsyncOperation'))
deserialized = self._deserialize('EnvironmentContainer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_create_or_update_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/environments/{environmentName}"} # type: ignore
@distributed_trace
def begin_create_or_update(
self,
resource_group_name, # type: str
registry_name, # type: str
environment_name, # type: str
body, # type: "_models.EnvironmentContainer"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.EnvironmentContainer"]
"""Create or update container.
Create or update container.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param registry_name: Name of Azure Machine Learning registry. This is case-insensitive.
:type registry_name: str
:param environment_name: Container name.
:type environment_name: str
:param body: Container entity to create or update.
:type body: ~azure.mgmt.machinelearningservices.models.EnvironmentContainer
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either EnvironmentContainer or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.machinelearningservices.models.EnvironmentContainer]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2023-06-01-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.EnvironmentContainer"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
environment_name=environment_name,
body=body,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('EnvironmentContainer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'original-uri'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/environments/{environmentName}"} # type: ignore
| [
"[email protected]"
] | |
7f7e6202d1b1aa73181f18176a9a42fd131c5023 | 9cef4ef20efd0eec18846242e78be0b9be144c30 | /homework/20200717/子进程中修改全局变量.py | 9a7989b2eec30662def98571d1867a21fc6cbba9 | [] | no_license | Vaild/python-learn | 4e6511a62a40b6104b081e0f8fe30f7d829901f5 | 5d602daf3b4b7e42349b7d9251df1f4dd62c299c | refs/heads/master | 2022-11-19T00:47:48.808384 | 2020-07-20T14:27:49 | 2020-07-20T14:27:49 | 279,044,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,394 | py | #!/usr/bin/python3
# coding = UTF-8
# code by va1id
# 定义全局变量
import os
import time
from multiprocessing import Process
N= [1, 2]
def change_process():
print('未修改之前的列表:', N)
print('列表此时的ID是:', os.getpid())
for i in range(10):
N.append(i)
print('修改之后的列表:', N)
print('修改之后的ID:', os.getpid())
time.sleep(1)
def watch_list():
print('列表是:', N)
time.sleep(1)
if __name__ == '__main__':
x = Process(target=change_process)
y = Process(target=watch_list)
x.start()
y.start()
x.join()
y.join()
print('最终的列表是:', N)
print('最终的ID是:', os.getpid())
# 对于整型数的尝试,发现整型数最后的时候打印出来的ID时发生了变化
N = 1
def change_process():
global N
print('未修改之前的列表:', N)
print('列表此时的ID是:', os.getpid())
for i in range(10):
N = i
print('修改之后的列表:', N)
print('修改之后的ID:', os.getpid())
time.sleep(1)
def watch_list():
print('列表是:', N)
time.sleep(1)
if __name__ == '__main__':
x = Process(target=change_process)
y = Process(target=watch_list)
x.start()
y.start()
x.join()
y.join()
print('最终的列表是:', N)
print('最终的ID是:', os.getpid())
| [
"[email protected]"
] | |
91bebb09bc6024dfa9e4392aaab0a55020f063b4 | d7f33fd19ecc59aadebe4f50db605ad39b967dbd | /disambig_creation.py | 36a957f12832a0936243132decf3d61f9165915d | [] | no_license | juansaenz/leaguepedia_util | 9687ab3634ccdf022c637323dceb0638ed5ece88 | 77e3dfe501b333b3a8dc2a04f207b8bf0858dbc3 | refs/heads/master | 2020-08-05T19:01:30.131055 | 2019-10-04T11:34:01 | 2019-10-04T11:34:01 | 212,667,697 | 0 | 0 | null | 2019-10-03T19:54:58 | 2019-10-03T19:54:58 | null | UTF-8 | Python | false | false | 7,129 | py | import re, threading, mwparserfromhell
from log_into_wiki import *
#################################################################################################
original_name = 'Starky'
irl_name = "Juan Carlos Cano"
new_name = '{} ({})'.format(original_name, irl_name.strip())
init_move = True
blank_edit = False
limit = -1
timeout_limit = 30
listplayer_templates = ["listplayer", "listplayer/Current"]
roster_templates = ["ExtendedRosterLine", "ExtendedRosterLine/MultipleRoles"]
scoreboard_templates = ["MatchRecapS8/Player","Scoreboard/Player"]
stat_templates = ["IPS", "CareerPlayerStats", "MatchHistoryPlayer"]
player_line_templates = ["LCKPlayerLine", "LCSPlayerLine"]
roster_change_templates = ["RosterChangeLine", "RosterRumorLine2",
"RosterRumorLineStay", "RosterRumorLineNot", "RosterRumorLine"]
summary = "Disambiguating {} to {}".format(original_name, new_name)
css_style = " {\n color:orange!important;\n font-weight:bold;\n}"
orig_name_lc = original_name[0].lower() + original_name[1:]
new_name_lc = new_name[0].lower() + new_name[1:]
blank_edit_these = []
#############################################################################################
def savepage(targetpage, savetext):
targetpage.save(savetext, summary=summary, tags="bot_disambig")
def blank_edit_page(page):
textname = str(page.name)
newpage = site.pages[textname]
text = newpage.text(cache=False)
page.save(text, summary="Blank Editing")
def move_page(from_page):
new_page_name = str(from_page.name).replace(original_name, new_name)
new_page = site.pages[new_page_name]
if new_page.exists:
print("{} already exists, cannot move!".format(from_page.name))
else:
print("Moving page {} to {}".format(from_page.name, new_page_name))
from_page.move(new_page_name, reason=summary, no_redirect=True)
blank_edit_these.append(new_page)
def edit_concept(concept):
text = concept.text()
wikitext = mwparserfromhell.parse(text)
for template in wikitext.filter_templates():
if template.name.matches("PlayerGamesConcept"):
i = 1
while template.has(i):
if template.get(i).strip() == original_name:
template.add(i, new_name)
elif template.get(i).strip() == orig_name_lc:
template.add(i, new_name_lc)
i = i + 1
newtext = str(wikitext)
if newtext != text:
concept.save(newtext, summary=summary, tags="bot_disambig")
def edit_subpage(subpage):
text = subpage.text()
wikitext = mwparserfromhell.parse(text)
for stemplate in wikitext.filter_templates():
if stemplate.has(1):
if stemplate.get(1).value.strip() == original_name:
stemplate.add(1, new_name)
newtext = str(wikitext)
if text != newtext:
print("Editing " + subpage.name + "...")
subpage.save(newtext, reason=summary)
def process_page(page):
print("Processing next page: " + page.name)
text = page.text()
origtext = text
# do links first because it's easier to just edit them as a string
if text.lower().startswith('#redirect') and page.name.lower() == original_name.lower():
pass
else:
text = text.replace("[[" + original_name + "]]", "[[" + new_name + "|" + original_name + "]]")
wikitext = mwparserfromhell.parse(text)
for template in wikitext.filter_templates():
process_template(template)
newtext = str(wikitext)
if origtext != newtext or blank_edit:
print("Saving...")
t = threading.Thread(target=savepage, kwargs={"targetpage": page, "savetext": newtext})
t.start()
t.join(timeout=timeout_limit)
else:
print("No changes, skipping")
def check_list(template, param, sep = ','):
if not template.has(param):
return
text_initial = template.get(param).value.strip()
tbl = text_initial.split(sep)
made_changes = False
for i, val in enumerate(tbl):
if val.strip() == original_name:
made_changes = True
tbl[i] = new_name
if made_changes:
template.add(param, sep.join(tbl))
def process_template(template):
def tl_matches(arr, field=None):
if field:
has_field = False
if template.has(field):
has_field = template.get(field).value.strip() == original_name
return [_ for _ in arr if template.name.matches(_)] and has_field
return [_ for _ in arr if template.name.matches(_)]
if tl_matches(['bl'], field=1) and not template.has(2):
template.add(1, new_name)
template.add(2, original_name)
elif tl_matches(listplayer_templates, field=1) and not template.has("link"):
template.add("link", new_name, before=1)
elif tl_matches(roster_templates, field='player') and not template.has('link'):
template.add("link", new_name, before="name")
elif tl_matches(scoreboard_templates, field='name'):
template.add("link", new_name, before="kills")
elif tl_matches(roster_change_templates, field='player'):
template.add("player", new_name + "{{!}}" + original_name)
elif tl_matches(['TeamRoster/Line', 'RosterLineOld'], field='player'):
template.add('link', new_name)
elif tl_matches(player_line_templates, field=1):
template.add(2, new_name)
elif tl_matches(['Player', 'RSRR/Player'], field=1):
template.add('link', new_name)
elif tl_matches(["MatchDetails/Series"], field='mvp'):
template.add("mvplink", new_name, before="mvp")
elif tl_matches(["PentakillLine"], field=6):
template.add("playerlink", new_name, before=6)
elif tl_matches(["MatchSchedule","MatchSchedule/Game"]):
if template.has("mvp"):
if template.get("mvp").value.strip() == original_name:
template.add("mvp", new_name)
check_list(template, 'with')
check_list(template, 'pbp')
check_list(template, 'color')
elif tl_matches(['ExternalContent/Line']):
check_list(template, 'players')
elif tl_matches(['SeasonAward']):
if template.has(1):
if template.get(1).value.strip() == original_name:
template.add('link', new_name)
check_links(template, 'eligibleplayers', 'eligiblelinks', ',', original_name, new_name)
elif tl_matches(['PlayerImageMetadata'], field="playerlink"):
template.add('playerlink', new_name)
elif tl_matches(["PortalCurrentRosters"]):
for pos in ['t', 'j', 'm', 'a', 's']:
for period in ['old', 'new']:
arg_name = pos + '_' + period
arg_link = arg_name + '_links'
check_links(template, arg_name, arg_link, ',', original_name, new_name)
def make_disambig_page():
text = "{{DisambigPage\n|player1=" + new_name + "\n|player2=\n}}"
page = site.pages[original_name]
old_text = page.text()
if 'disambigpage' not in old_text.lower():
page.save(text, summary=summary)
site = login('me','lol')
thispage = site.pages[original_name]
newpage = site.pages[new_name]
if init_move:
move_page(thispage)
subpages = site.allpages(prefix=original_name + "/")
for subpage in subpages:
edit_subpage(subpage)
move_page(subpage)
concept = site.pages["Concept:{}/Games".format(original_name)]
if concept.exists:
edit_concept(concept)
move_page(concept)
pages = thispage.backlinks()
i = 0
for page in pages:
if i == limit:
break
i = i + 1
process_page(page)
print("Blank editing...")
if init_move:
for page in blank_edit_these:
blank_edit_page(page)
make_disambig_page()
print("Done! If some pages stalled out you may still need to abort manually.")
| [
"[email protected]"
] | |
fe8bc6b1e305d6f955d5f51ccdbcbb03567c3c5f | 33787153c4f85cb626cf16e0e4d40d5970df2871 | /reservation_system/order/admin.py | 873c45edd8890e68e2a1d428784197d437bc8309 | [] | no_license | nazaninsbr/E-Commerce-Website | 93e14576d69618f749d7fd6f19b6b1b989fa47a8 | 46bed8c58f4adac37dc4ddd881f57e694961b9b2 | refs/heads/master | 2021-09-22T14:20:56.940950 | 2018-09-11T05:01:23 | 2018-09-11T05:01:23 | 125,370,560 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 508 | py | from django.contrib import admin
from .models import Order, OrderItem
class OrderItemInline(admin.TabularInline):
model = OrderItem
raw_id_fields = ['product']
class OrderAdmin(admin.ModelAdmin):
list_display = ['id', 'first_name', 'last_name', 'email', 'address', 'postal_code', 'department', 'paid', 'created',
'updated']
list_filter = ['paid', 'created', 'updated', 'last_name', 'department']
inlines = [OrderItemInline]
admin.site.register(Order, OrderAdmin) | [
"[email protected]"
] | |
99addf12f6477d42e6cc49078268d5e6afdf5f2d | d7016f69993570a1c55974582cda899ff70907ec | /sdk/devcenter/azure-developer-devcenter/samples/create_environment_sample.py | 637a5e178d90414987a7ffcd3707ca295f73c41e | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | kurtzeborn/azure-sdk-for-python | 51ca636ad26ca51bc0c9e6865332781787e6f882 | b23e71b289c71f179b9cf9b8c75b1922833a542a | refs/heads/main | 2023-03-21T14:19:50.299852 | 2023-02-15T13:30:47 | 2023-02-15T13:30:47 | 157,927,277 | 0 | 0 | MIT | 2022-07-19T08:05:23 | 2018-11-16T22:15:30 | Python | UTF-8 | Python | false | false | 3,239 | py | # coding=utf-8
# --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
import logging
import os
from azure.developer.devcenter import DevCenterClient
from azure.identity import DefaultAzureCredential
from azure.core.exceptions import HttpResponseError
def main():
logging.basicConfig(level=logging.DEBUG)
LOG = logging.getLogger()
# Set the values of the dev center endpoint, client ID, and client secret of the AAD application as environment variables:
# DEVCENTER_ENDPOINT, AZURE_CLIENT_ID, AZURE_CLIENT_SECRET
try:
endpoint = os.environ["DEVCENTER_ENDPOINT"]
except KeyError:
LOG.error("Missing environment variable 'DEVCENTER_ENDPOINT' - please set it before running the example")
exit()
# Build a client through AAD
client = DevCenterClient(endpoint, credential=DefaultAzureCredential())
# Fetch control plane resource dependencies
target_project_name = list(client.dev_center.list_projects(top=1))[0]['name']
target_catalog_item_name = list(client.environments.list_catalog_items(target_project_name, top=1))[0]['name']
target_environment_type_name = list(client.environments.list_environment_types(target_project_name, top=1))[0]['name']
# Stand up a new environment
create_response = client.environments.begin_create_environment(target_project_name,
"Dev_Environment",
{"catalogItemName": target_catalog_item_name, "environmentType": target_environment_type_name})
environment_result = create_response.result()
LOG.info(f"Provisioned environment with status {environment_result['provisioningState']}.")
# Tear down the environment when finished
delete_response = client.environments.begin_delete_environment(target_project_name, "Dev_Environment")
delete_response.wait()
LOG.info("Completed deletion for the environment.")
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
e48457d278580c6a39e5c28f37e35ec482015823 | 5b58a332c6bea0688d196aabedfc8ccc49bdd134 | /experiments/cnn_short_embeddings/get_embeddings.py | c2dfad97b2a1949c516d77ecfb2e88218654e2d3 | [] | no_license | ver228/classify_strains | 5420c2b3ea8e93b6ba46900c385f52f664f1cbd7 | dc61e7431410e25ab7c2da0acb6d090cc2ebaabb | refs/heads/master | 2021-09-20T08:52:14.505868 | 2018-08-07T12:26:22 | 2018-08-07T12:26:22 | 108,448,619 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,628 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 17 14:57:25 2017
@author: ajaver
"""
import sys
import os
import pandas as pd
import torch
import tqdm
import numpy as np
import tables
#Be sure to use abspath linux does not give the path if one uses __file__
_BASEDIR = os.path.dirname(os.path.abspath(__file__))
src_dir = os.path.join(_BASEDIR, os.pardir, os.pardir, 'src')
sys.path.append(src_dir)
from classify.flow import SkeletonsFlowFull, get_valid_strains, get_datset_file
import classify.models.model_w_embedding as models
if __name__ == '__main__':
dname = '/Users/ajaver/OneDrive - Imperial College London/classify_strains/trained_models'
#model_file = 'resnet18_w_emb_R_L256_l2_0.1_20171126_010058_best.pth.tar'
#props = dict(
# model_name = 'resnet18_w_emb',
# is_residual = True,
# embedding_size = 256
# )
model_file = 'simple_w_emb_R_L256_l2_0.01_20171126_010327_best.pth.tar'
props = dict(
model_name = 'simple_w_emb',
is_residual = True,
embedding_size = 256
)
embeddings_file = data_file.replace('_skel_smoothed.hdf5', '_embedings.hdf5')
model_path = os.path.join(dname, model_file)
dataset = 'CeNDR'
valid_strains = get_valid_strains(dataset, is_reduced=True)
data_file = get_datset_file(dataset)
gen = SkeletonsFlowFull(
n_batch = 32,
data_file = data_file,
sample_size_seconds = 10,
sample_frequency_s = 0.04,
valid_strains = valid_strains,
label_type = 'row_id',
is_return_snps = False,
transform_type = 'angles'
)
get_model_func = getattr(models, props['model_name'])
model = get_model_func(gen, props['embedding_size'])
checkpoint = torch.load(model_path, map_location=lambda storage, loc: storage)
model.load_state_dict(checkpoint['state_dict'])
model.eval()
results = []
for ii, (input_v, row_ids) in enumerate(tqdm.tqdm(gen)):
video_embedding = model.video_model(input_v)
pred = model.classification(video_embedding).max(1)[1]
dat = [x.data.numpy() for x in (row_ids, video_embedding, pred)]
results.append(dat)
#%%
row_ids, embeddings, predictions = map(np.concatenate, zip(*results))
df_g = pd.DataFrame(row_ids.T, columns=['row_id']).groupby('row_id').groups
embedding_groups = []
for irow, row in gen.skeletons_ranges.iterrows():
if irow in df_g:
row_n = row[['experiment_id', 'worm_index', 'strain', 'strain_id']].copy()
row_n['ini'] = df_g[irow].min()
row_n['fin'] = df_g[irow].max()
row_n['skel_group_id'] = irow
embedding_groups.append(row_n)
embedding_groups = pd.DataFrame(embedding_groups)
#%%
snps_embeddings = np.full((gen.n_classes, props['embedding_size']), np.nan, dtype=np.float32)
for strain_id in gen.strain_ids:
strain = gen.strain_codes.loc[strain_id, 'strain']
snps = gen.snps_data[strain].values.T.astype(np.float32)
snps = torch.from_numpy(snps).float()
snps = torch.autograd.Variable(snps)
snps_embedding = model.snp_mapper(snps)
snps_embeddings[strain_id] = snps_embedding.data.numpy()
#%%
TABLE_FILTERS = tables.Filters(
complevel=5,
complib='zlib',
shuffle=True,
fletcher32=True)
fields2copy = ['experiments_data', 'snps_data', 'strains_codes']
with tables.File(data_file, 'r') as fid_old, \
tables.File(embeddings_file, "w") as fid_new:
for field in fields2copy:
tab = fid_old.get_node('/' +field)[:]
fid_new.create_table('/',
field,
obj=tab,
filters=TABLE_FILTERS)
gg = fid_new.create_group('/', 'index_groups')
for field in ['train', 'test', 'val']:
tab = fid_old.get_node('/index_groups/' +field)[:]
fid_new.create_array('/index_groups',
field,
obj=tab)
#%%
with tables.File(embeddings_file, "r+") as fid:
table_type = np.dtype([('experiment_id', np.int32),
('worm_index', np.int32),
('strain', 'S10'),
('ini', np.int32),
('fin', np.int32),
('skel_group_id', np.int32)
])
em = embedding_groups[list(table_type.names)].to_records(index=False).astype(table_type)
fid.create_table('/',
'embedding_groups',
obj=em,
filters=TABLE_FILTERS)
fid.create_carray('/',
'video_embeddings',
obj=embeddings,
filters=TABLE_FILTERS)
fid.create_carray('/',
'predicted_strain_id',
obj=predictions,
filters=TABLE_FILTERS)
fid.create_carray('/',
'snps_embeddings',
obj=snps_embeddings,
filters=TABLE_FILTERS) | [
"[email protected]"
] | |
e563aed309c589208366c684765d85e75140080d | 78a28bd6b95041bfe67d8aa6a3a3c111911afaab | /03.Complete Python Developer - Zero to Mastery - AN/01.Python Basics/13.1 Exercise Repl.py | 1e15204ddbb52cbedf68211f4ad2a6edf0cfeb2c | [
"MIT"
] | permissive | jailukanna/Python-Projects-Dojo | 8200a60ab925bf796bd39cb1977e6f0e0a575c23 | 98c7234b84f0afea99a091c7198342d66bbdff5b | refs/heads/master | 2023-03-15T06:54:38.141189 | 2021-03-11T08:17:02 | 2021-03-11T08:17:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 181 | py | counter = 0
counter += 1
counter += 1
counter += 1
counter += 1
counter -= 1
counter *=2
#Before you click RUN, guess what the counter variable holds in memory!
print(counter)
#6 | [
"[email protected]"
] | |
26f999a48d12ee717570e8a1ae1e7e96e06c6f69 | 245b0329360b18c32510a6d13b2650fd6ca752cc | /ch03/trendline.py | 14d037ac047d819a1524cf921cf4e087b23e6a60 | [] | no_license | TanUkkii007/numpy-begginers-guide | 56c315d207f681bd4e6d70abeac82bfc0db2bad5 | 6d483bc8672947a06d4240c4379f00183da46d8b | refs/heads/master | 2021-01-17T08:08:07.113571 | 2016-06-29T16:45:59 | 2016-06-29T16:45:59 | 61,984,775 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,410 | py | import numpy as np
import matplotlib.pyplot as plt
def fit_line(t,y):
'''Fits t to a line y = at + b'''
A = np.vstack([t, np.ones_like(t)]).T
return np.linalg.lstsq(A, y)[0]
# Determine pivots
h,l,c = np.loadtxt('data.csv', delimiter=',', usecols=(4,5,6), unpack=True)
pivots = (h+l+c)/3
# Fit trend lines
t = np.arange(len(c))
sa, sb = fit_line(t, pivots - (h-l))
ra, rb = fit_line(t, pivots + (h-l))
support = sa*t + sb
resistance = ra*t + rb
condition = (c > support) & (c < resistance)
print("condition", condition)
between_bands = np.where(condition)
print("between_bands", between_bands)
print("support[between_bands]", support[between_bands])
print("c[between_bands]", c[between_bands])
print("resistance[between_bands]", resistance[between_bands])
n_between_bands = len(np.ravel(between_bands))
print("Number points between bands", between_bands)
print("Ratio between bands", float(n_between_bands)/len(c))
print("Tomorrows support", sa * (t[-1] + 1) + sb)
print("Tomorrows resistance", ra * (t[-1] + 1) + rb)
a1 = c[c > support]
a2 = c[c < resistance]
print("Number of points between bands 2nd approach" ,len(np.intersect1d(a1, a2)))
plt.plot(t, c, label='Data')
plt.plot(t, support, '--', lw=2.0, label='Support')
plt.plot(t, resistance, '-.', lw=3.0, label='Resistance')
plt.title('Trend Lines')
plt.xlabel('Days')
plt.ylabel('Price ($)')
plt.grid()
plt.legend()
plt.show()
| [
"[email protected]"
] | |
b45c9b02f05aa2a1d529bacf9ec69c1b70e7e35b | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /MY_REPOS/Data-Structures-Algos-Codebase/ALGO/__PYTHON/YT_DOWN.py | 2e47b44e59c00793d16628f53a9120871d31a7a4 | [
"MIT"
] | permissive | bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 | MIT | 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null | UTF-8 | Python | false | false | 310 | py | simport pafy
url = "https://www.youtube.com/watch?v=OE7wUUpJw6I&list=PL2_aWCzGMAwLPEZrZIcNEq9ukGWPfLT4A"
video = pafy.new(url)
print(video.title)
stream=pafy.new(url).streams
best=video.getbest()
for i in stream:
print(i)
print(best.resolution,best.extension)
print(best.url)
best.download(quiet=False)
| [
"[email protected]"
] | |
2e06b237f8de00ed2eda46de62f6d4a013feeb58 | 5989e503a733e8b29f4c502008446a75c2b43ff8 | /src/geofr/api/serializers.py | e6b9048e6acbf53169edb9aa681077ec2224fb2d | [] | no_license | samuelpath/aides-territoires | 399a6a7b0607ef5a8d2b327247446b239f5b1a42 | 5793bd49d7157a34e08c29e56a46e1e3ead0651f | refs/heads/master | 2022-12-20T14:35:18.671563 | 2020-08-21T08:00:33 | 2020-08-21T08:00:33 | 288,424,578 | 0 | 0 | null | 2020-08-18T10:27:17 | 2020-08-18T10:27:16 | null | UTF-8 | Python | false | false | 388 | py | from rest_framework import serializers
from geofr.models import Perimeter
class PerimeterSerializer(serializers.ModelSerializer):
id = serializers.CharField(source='id_slug')
scale = serializers.CharField(source='get_scale_display')
text = serializers.CharField(source='__str__')
class Meta:
model = Perimeter
fields = ('id', 'name', 'scale', 'text')
| [
"[email protected]"
] | |
c9b4bbec3144cad904dfdcd615ec193095982826 | 0d9de851c1300e3e32d0cc451c11056c8d66000b | /src/lib/simpleFunctions/__init__.py | c80d4494c99e377f9f42fe8c5b12f188feb330b1 | [
"MIT"
] | permissive | sankhaMukherjee/vtk | 3fa91641c832f4260514588425474a42dce51d75 | fc6ae8b4e56d62796a1a0d28e0c7dce598114103 | refs/heads/master | 2022-12-11T02:02:08.036116 | 2020-02-24T15:51:41 | 2020-02-24T15:51:41 | 232,733,822 | 0 | 0 | MIT | 2022-12-08T03:24:47 | 2020-01-09T05:59:01 | Python | UTF-8 | Python | false | false | 112 | py | """simpleFunctions
This contains simple functions that can be used for creating more
complex visualizations
""" | [
"[email protected]"
] | |
d9213d2c6b72b46b327ce6a9697cc507a3850b4e | c29cdc64f42eae3bc7d584a7b9b84961ce09da04 | /bitcoinx/__init__.py | d8c08b3d58a13838d07a60b0f1dbabaa6c15fe86 | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | msinkec/bitcoinX | d379e32e9dc0f43700710032ad0ce7ffca2e398d | 26ec8a8a8c2a6423e0438dea9918c740c96f5eb2 | refs/heads/master | 2023-04-28T18:52:12.072225 | 2021-05-11T11:19:44 | 2021-05-11T11:19:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 830 | py | from .address import *
from .base58 import *
from .bip32 import *
from .chain import *
from .consts import *
from .errors import *
from .hashes import *
from .interpreter import *
from .keys import *
from .misc import *
from .mnemonic import *
from .networks import *
from .packing import *
from .script import *
from .signature import *
from .tx import *
from .work import *
_version_str = '0.4.1'
_version = tuple(int(part) for part in _version_str.split('.'))
__all__ = sum((
address.__all__,
base58.__all__,
bip32.__all__,
chain.__all__,
consts.__all__,
errors.__all__,
hashes.__all__,
interpreter.__all__,
keys.__all__,
misc.__all__,
mnemonic.__all__,
networks.__all__,
packing.__all__,
script.__all__,
signature.__all__,
tx.__all__,
work.__all__,
), ())
| [
"[email protected]"
] | |
2db47ba68f2ea413bcfb86f6c1fe52f077823732 | 98b1956594921aeef6e4b3c0f5b15703c3eee6a7 | /atom/proton/python/proton_api/models/purchase_calculator_horizon_request.py | b7590bee1dea46714dec17a3678d38a86cb52947 | [
"Apache-2.0"
] | permissive | sumit4-ttn/SDK | d4db3dcac077e9c9508a8227010a2ab764c31023 | b3ae385e5415e47ac70abd0b3fdeeaeee9aa7cff | refs/heads/master | 2022-11-25T14:05:16.911068 | 2020-08-09T17:31:55 | 2020-08-09T17:31:55 | 286,413,715 | 0 | 0 | Apache-2.0 | 2020-08-10T08:03:04 | 2020-08-10T08:03:03 | null | UTF-8 | Python | false | false | 11,099 | py | # coding: utf-8
"""
Hydrogen Proton API
Financial engineering module of Hydrogen Atom # noqa: E501
OpenAPI spec version: 1.7.18
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class PurchaseCalculatorHorizonRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'deposit_schedule': 'object',
'portfolio_return': 'float',
'investment_tax': 'float',
'inflation_rate': 'float',
'purchase_amount': 'float',
'aggregation_account_ids': 'list[str]',
'account_ids': 'list[str]',
'current_savings': 'float'
}
attribute_map = {
'deposit_schedule': 'deposit_schedule',
'portfolio_return': 'portfolio_return',
'investment_tax': 'investment_tax',
'inflation_rate': 'inflation_rate',
'purchase_amount': 'purchase_amount',
'aggregation_account_ids': 'aggregation_account_ids',
'account_ids': 'account_ids',
'current_savings': 'current_savings'
}
def __init__(self, deposit_schedule=None, portfolio_return=None, investment_tax=0.0, inflation_rate=0.0, purchase_amount=None, aggregation_account_ids=None, account_ids=None, current_savings=0.0): # noqa: E501
"""PurchaseCalculatorHorizonRequest - a model defined in Swagger""" # noqa: E501
self._deposit_schedule = None
self._portfolio_return = None
self._investment_tax = None
self._inflation_rate = None
self._purchase_amount = None
self._aggregation_account_ids = None
self._account_ids = None
self._current_savings = None
self.discriminator = None
if deposit_schedule is not None:
self.deposit_schedule = deposit_schedule
self.portfolio_return = portfolio_return
if investment_tax is not None:
self.investment_tax = investment_tax
if inflation_rate is not None:
self.inflation_rate = inflation_rate
self.purchase_amount = purchase_amount
if aggregation_account_ids is not None:
self.aggregation_account_ids = aggregation_account_ids
if account_ids is not None:
self.account_ids = account_ids
if current_savings is not None:
self.current_savings = current_savings
@property
def deposit_schedule(self):
"""Gets the deposit_schedule of this PurchaseCalculatorHorizonRequest. # noqa: E501
:return: The deposit_schedule of this PurchaseCalculatorHorizonRequest. # noqa: E501
:rtype: object
"""
return self._deposit_schedule
@deposit_schedule.setter
def deposit_schedule(self, deposit_schedule):
"""Sets the deposit_schedule of this PurchaseCalculatorHorizonRequest.
:param deposit_schedule: The deposit_schedule of this PurchaseCalculatorHorizonRequest. # noqa: E501
:type: object
"""
self._deposit_schedule = deposit_schedule
@property
def portfolio_return(self):
"""Gets the portfolio_return of this PurchaseCalculatorHorizonRequest. # noqa: E501
:return: The portfolio_return of this PurchaseCalculatorHorizonRequest. # noqa: E501
:rtype: float
"""
return self._portfolio_return
@portfolio_return.setter
def portfolio_return(self, portfolio_return):
"""Sets the portfolio_return of this PurchaseCalculatorHorizonRequest.
:param portfolio_return: The portfolio_return of this PurchaseCalculatorHorizonRequest. # noqa: E501
:type: float
"""
if portfolio_return is None:
raise ValueError("Invalid value for `portfolio_return`, must not be `None`") # noqa: E501
if portfolio_return is not None and portfolio_return < -1: # noqa: E501
raise ValueError("Invalid value for `portfolio_return`, must be a value greater than or equal to `-1`") # noqa: E501
self._portfolio_return = portfolio_return
@property
def investment_tax(self):
"""Gets the investment_tax of this PurchaseCalculatorHorizonRequest. # noqa: E501
:return: The investment_tax of this PurchaseCalculatorHorizonRequest. # noqa: E501
:rtype: float
"""
return self._investment_tax
@investment_tax.setter
def investment_tax(self, investment_tax):
"""Sets the investment_tax of this PurchaseCalculatorHorizonRequest.
:param investment_tax: The investment_tax of this PurchaseCalculatorHorizonRequest. # noqa: E501
:type: float
"""
if investment_tax is not None and investment_tax > 1: # noqa: E501
raise ValueError("Invalid value for `investment_tax`, must be a value less than or equal to `1`") # noqa: E501
if investment_tax is not None and investment_tax < 0: # noqa: E501
raise ValueError("Invalid value for `investment_tax`, must be a value greater than or equal to `0`") # noqa: E501
self._investment_tax = investment_tax
@property
def inflation_rate(self):
"""Gets the inflation_rate of this PurchaseCalculatorHorizonRequest. # noqa: E501
:return: The inflation_rate of this PurchaseCalculatorHorizonRequest. # noqa: E501
:rtype: float
"""
return self._inflation_rate
@inflation_rate.setter
def inflation_rate(self, inflation_rate):
"""Sets the inflation_rate of this PurchaseCalculatorHorizonRequest.
:param inflation_rate: The inflation_rate of this PurchaseCalculatorHorizonRequest. # noqa: E501
:type: float
"""
if inflation_rate is not None and inflation_rate < -1: # noqa: E501
raise ValueError("Invalid value for `inflation_rate`, must be a value greater than or equal to `-1`") # noqa: E501
self._inflation_rate = inflation_rate
@property
def purchase_amount(self):
"""Gets the purchase_amount of this PurchaseCalculatorHorizonRequest. # noqa: E501
:return: The purchase_amount of this PurchaseCalculatorHorizonRequest. # noqa: E501
:rtype: float
"""
return self._purchase_amount
@purchase_amount.setter
def purchase_amount(self, purchase_amount):
"""Sets the purchase_amount of this PurchaseCalculatorHorizonRequest.
:param purchase_amount: The purchase_amount of this PurchaseCalculatorHorizonRequest. # noqa: E501
:type: float
"""
if purchase_amount is None:
raise ValueError("Invalid value for `purchase_amount`, must not be `None`") # noqa: E501
if purchase_amount is not None and purchase_amount < 0: # noqa: E501
raise ValueError("Invalid value for `purchase_amount`, must be a value greater than or equal to `0`") # noqa: E501
self._purchase_amount = purchase_amount
@property
def aggregation_account_ids(self):
"""Gets the aggregation_account_ids of this PurchaseCalculatorHorizonRequest. # noqa: E501
:return: The aggregation_account_ids of this PurchaseCalculatorHorizonRequest. # noqa: E501
:rtype: list[str]
"""
return self._aggregation_account_ids
@aggregation_account_ids.setter
def aggregation_account_ids(self, aggregation_account_ids):
"""Sets the aggregation_account_ids of this PurchaseCalculatorHorizonRequest.
:param aggregation_account_ids: The aggregation_account_ids of this PurchaseCalculatorHorizonRequest. # noqa: E501
:type: list[str]
"""
self._aggregation_account_ids = aggregation_account_ids
@property
def account_ids(self):
"""Gets the account_ids of this PurchaseCalculatorHorizonRequest. # noqa: E501
:return: The account_ids of this PurchaseCalculatorHorizonRequest. # noqa: E501
:rtype: list[str]
"""
return self._account_ids
@account_ids.setter
def account_ids(self, account_ids):
"""Sets the account_ids of this PurchaseCalculatorHorizonRequest.
:param account_ids: The account_ids of this PurchaseCalculatorHorizonRequest. # noqa: E501
:type: list[str]
"""
self._account_ids = account_ids
@property
def current_savings(self):
"""Gets the current_savings of this PurchaseCalculatorHorizonRequest. # noqa: E501
:return: The current_savings of this PurchaseCalculatorHorizonRequest. # noqa: E501
:rtype: float
"""
return self._current_savings
@current_savings.setter
def current_savings(self, current_savings):
"""Sets the current_savings of this PurchaseCalculatorHorizonRequest.
:param current_savings: The current_savings of this PurchaseCalculatorHorizonRequest. # noqa: E501
:type: float
"""
if current_savings is not None and current_savings < 0: # noqa: E501
raise ValueError("Invalid value for `current_savings`, must be a value greater than or equal to `0`") # noqa: E501
self._current_savings = current_savings
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PurchaseCalculatorHorizonRequest, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PurchaseCalculatorHorizonRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
ed146779588cd7189d51bb5d520952ed956f518a | 7ff0077a55f6bf4a74704e430f354aeabaae3e0b | /tensorflow_probability/python/distributions/relaxed_onehot_categorical_test.py | a92246b95c13587324e028dd9841e11f232c49ad | [
"Apache-2.0"
] | permissive | markaduol/probability | 50a1d97810d11c747bd9546f977b2937c9e04d78 | 8af21dff96502a5bdc01b1be2c595043a3efc5d1 | refs/heads/master | 2020-03-29T20:50:26.001297 | 2018-09-25T21:51:10 | 2018-09-25T21:51:50 | 150,333,784 | 0 | 1 | Apache-2.0 | 2018-09-25T21:54:49 | 2018-09-25T21:54:49 | null | UTF-8 | Python | false | false | 6,579 | py | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for Relaxed One-Hot Categorical distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
from scipy.special import gamma
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow.python.framework import test_util
tfd = tfp.distributions
def make_relaxed_categorical(batch_shape, num_classes, dtype=tf.float32):
logits = tf.random_uniform(
list(batch_shape) + [num_classes], -10, 10, dtype=dtype) - 50.
temperatures = tf.random_uniform(list(batch_shape), 0.1, 10, dtype=tf.float32)
return tfd.RelaxedOneHotCategorical(temperatures, logits)
@test_util.run_all_in_graph_and_eager_modes
class ExpRelaxedOneHotCategoricalTest(tf.test.TestCase):
def testP(self):
temperature = 1.0
logits = [2.0, 3.0, -4.0]
dist = tfd.ExpRelaxedOneHotCategorical(temperature, logits)
expected_p = np.exp(logits)/np.sum(np.exp(logits))
self.assertAllClose(expected_p, self.evaluate(dist.probs))
self.assertAllEqual([3], dist.probs.get_shape())
def testPdf(self):
temperature = .4
logits = [.3, .1, .4]
k = len(logits)
p = np.exp(logits)/np.sum(np.exp(logits))
dist = tfd.ExpRelaxedOneHotCategorical(temperature, logits)
x = self.evaluate(dist.sample())
# analytical ExpConcrete density presented in Maddison et al. 2016
prod_term = p * np.exp(-temperature * x)
expected_pdf = (
gamma(k) * np.power(temperature, k - 1) * np.prod(
prod_term / np.sum(prod_term)))
pdf = self.evaluate(dist.prob(x))
self.assertAllClose(expected_pdf, pdf)
@test_util.run_all_in_graph_and_eager_modes
class RelaxedOneHotCategoricalTest(tf.test.TestCase):
def testLogits(self):
temperature = 1.0
logits = [2.0, 3.0, -4.0]
dist = tfd.RelaxedOneHotCategorical(temperature, logits)
# check p for ExpRelaxed base distribution
self.assertAllClose(logits, self.evaluate(dist._distribution.logits))
self.assertAllEqual([3], dist._distribution.logits.get_shape())
def testSample(self):
temperature = 1.4
# single logit
logits = [.3, .1, .4]
dist = tfd.RelaxedOneHotCategorical(temperature, logits)
self.assertAllEqual([3], self.evaluate(dist.sample()).shape)
self.assertAllEqual([5, 3], self.evaluate(dist.sample(5)).shape)
# multiple distributions
logits = [[2.0, 3.0, -4.0], [.3, .1, .4]]
dist = tfd.RelaxedOneHotCategorical(temperature, logits)
self.assertAllEqual([2, 3], self.evaluate(dist.sample()).shape)
self.assertAllEqual([5, 2, 3], self.evaluate(dist.sample(5)).shape)
# multiple distributions
logits = np.random.uniform(size=(4, 1, 3)).astype(np.float32)
dist = tfd.RelaxedOneHotCategorical(temperature, logits)
self.assertAllEqual([4, 1, 3], self.evaluate(dist.sample()).shape)
self.assertAllEqual([5, 4, 1, 3], self.evaluate(dist.sample(5)).shape)
def testPdf(self):
def analytical_pdf(x, temperature, logits):
# analytical density of RelaxedOneHotCategorical
temperature = np.reshape(temperature, (-1, 1))
if len(x.shape) == 1:
x = np.expand_dims(x, 0)
k = logits.shape[1]
p = np.exp(logits)/np.sum(np.exp(logits), axis=1, keepdims=True)
term1 = gamma(k)*np.power(temperature, k-1)
term2 = np.sum(p/(np.power(x, temperature)), axis=1, keepdims=True)
term3 = np.prod(p/(np.power(x, temperature+1)), axis=1, keepdims=True)
expected_pdf = term1*np.power(term2, -k)*term3
return expected_pdf
temperature = .4
logits = np.array([[.3, .1, .4]]).astype(np.float32)
dist = tfd.RelaxedOneHotCategorical(temperature, logits)
x = self.evaluate(dist.sample())
pdf = self.evaluate(dist.prob(x))
expected_pdf = analytical_pdf(x, temperature, logits)
self.assertAllClose(expected_pdf.flatten(), pdf, rtol=1e-4)
# variable batch size
logits = np.array([[.3, .1, .4], [.6, -.1, 2.]]).astype(np.float32)
temperatures = np.array([0.4, 2.3]).astype(np.float32)
dist = tfd.RelaxedOneHotCategorical(temperatures, logits)
x = self.evaluate(dist.sample())
pdf = self.evaluate(dist.prob(x))
expected_pdf = analytical_pdf(x, temperatures, logits)
self.assertAllClose(expected_pdf.flatten(), pdf, rtol=1e-4)
def testShapes(self):
for batch_shape in ([], [1], [2, 3, 4]):
dist = make_relaxed_categorical(batch_shape, 10)
self.assertAllEqual(batch_shape, dist.batch_shape.as_list())
self.assertAllEqual(batch_shape, self.evaluate(dist.batch_shape_tensor()))
self.assertAllEqual([10], self.evaluate(dist.event_shape_tensor()))
self.assertAllEqual([10], self.evaluate(dist.event_shape_tensor()))
for batch_shape in ([], [1], [2, 3, 4]):
dist = make_relaxed_categorical(batch_shape,
tf.constant(10, dtype=tf.int32))
self.assertAllEqual(len(batch_shape), dist.batch_shape.ndims)
self.assertAllEqual(batch_shape, self.evaluate(dist.batch_shape_tensor()))
self.assertAllEqual([10], self.evaluate(dist.event_shape_tensor()))
self.assertAllEqual([10], self.evaluate(dist.event_shape_tensor()))
def testUnknownShape(self):
logits_pl = tf.placeholder_with_default(input=[.3, .1, .4], shape=None)
temperature = 1.0
dist = tfd.ExpRelaxedOneHotCategorical(temperature, logits_pl)
self.assertAllEqual([3], self.evaluate(dist.sample()).shape)
self.assertAllEqual([5, 3], self.evaluate(dist.sample(5)).shape)
def testDTypes(self):
# check that sampling and log_prob work for a range of dtypes
for dtype in (tf.float16, tf.float32, tf.float64):
logits = tf.random_uniform(shape=[3, 3], dtype=dtype)
dist = tfd.RelaxedOneHotCategorical(temperature=0.5, logits=logits)
dist.log_prob(dist.sample())
if __name__ == "__main__":
tf.test.main()
| [
"[email protected]"
] | |
6741d8af6bd64f44ce8282efeb4594958db494e4 | 1fc9a12c86be4e440f4f0d95c8b871c79df07545 | /ML Libariries/python_pandas/dataframe_row.py | b1f101d5632b2dcc45428ad701ace69103a14311 | [] | no_license | Rohit-Gupta-Web3/Articles | a56e7f1b36d6d5efd846eec2e1a4036716ac16eb | 0f584916e065059d4dd1e95e7de874a7830fdff4 | refs/heads/master | 2023-05-14T02:50:17.817951 | 2020-07-26T06:44:10 | 2020-07-26T06:44:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | import pandas as pd
data = {'Name':['C','Sharp','Corner'], 'Age':[20,21,22], 'Address':['Delhi','Kanpur','Tamil Nadu']}
df = pd.DataFrame(data)
data1= df.loc[0]
print(data1)
| [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.