blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0f0419366d770abc9585bc6c697d5ec7c3484d0b | 000a4b227d970cdc6c8db192f4437698cb782721 | /python/helpers/typeshed/stubs/pytz/pytz/tzinfo.pyi | c2c68526870de2234b2bef2a4957d2805ca999af | [
"Apache-2.0",
"MIT"
] | permissive | trinhanhngoc/intellij-community | 2eb2f66a2a3a9456e7a0c5e7be1eaba03c38815d | 1d4a962cfda308a73e0a7ef75186aaa4b15d1e17 | refs/heads/master | 2022-11-03T21:50:47.859675 | 2022-10-19T16:39:57 | 2022-10-19T23:25:35 | 205,765,945 | 1 | 0 | Apache-2.0 | 2019-09-02T02:55:15 | 2019-09-02T02:55:15 | null | UTF-8 | Python | false | false | 2,045 | pyi | import datetime
from abc import abstractmethod
from typing import Any
class BaseTzInfo(datetime.tzinfo):
zone: str | None # Actually None but should be set on concrete subclasses
# The following abstract methods don't exist in the implementation, but
# are implemented by all sub-classes.
@abstractmethod
def localize(self, dt: datetime.datetime) -> datetime.datetime: ...
@abstractmethod
def normalize(self, dt: datetime.datetime) -> datetime.datetime: ...
@abstractmethod
def tzname(self, dt: datetime.datetime | None) -> str: ...
@abstractmethod
def utcoffset(self, dt: datetime.datetime | None) -> datetime.timedelta | None: ...
@abstractmethod
def dst(self, dt: datetime.datetime | None) -> datetime.timedelta | None: ...
class StaticTzInfo(BaseTzInfo):
def fromutc(self, dt: datetime.datetime) -> datetime.datetime: ...
def localize(self, dt: datetime.datetime, is_dst: bool | None = ...) -> datetime.datetime: ...
def normalize(self, dt: datetime.datetime, is_dst: bool | None = ...) -> datetime.datetime: ...
def tzname(self, dt: datetime.datetime | None, is_dst: bool | None = ...) -> str: ...
def utcoffset(self, dt: datetime.datetime | None, is_dst: bool | None = ...) -> datetime.timedelta: ...
def dst(self, dt: datetime.datetime | None, is_dst: bool | None = ...) -> datetime.timedelta: ...
class DstTzInfo(BaseTzInfo):
def __init__(self, _inf: Any = ..., _tzinfos: Any = ...) -> None: ...
def fromutc(self, dt: datetime.datetime) -> datetime.datetime: ...
def localize(self, dt: datetime.datetime, is_dst: bool | None = ...) -> datetime.datetime: ...
def normalize(self, dt: datetime.datetime) -> datetime.datetime: ...
def tzname(self, dt: datetime.datetime | None, is_dst: bool | None = ...) -> str: ...
def utcoffset(self, dt: datetime.datetime | None, is_dst: bool | None = ...) -> datetime.timedelta | None: ...
def dst(self, dt: datetime.datetime | None, is_dst: bool | None = ...) -> datetime.timedelta | None: ...
| [
"[email protected]"
] | |
02753e92896aace5a8746356bcdc3beaf50d75d0 | 255e90b6e98753c1b7530279064a9cf5da1a6866 | /tests/migrations/0003_radiology_lab_name.py | 7fd80843e89a480259326bde399b61a4a0ecdc38 | [] | no_license | MindSparkTm/clinicmanagement | 31b5f66552da3cf51f900e2fd8a75c6e8228c56c | 5c327126af75d342890645ead7dd835ef45111f7 | refs/heads/master | 2020-03-31T22:18:14.201928 | 2018-05-07T10:45:56 | 2018-05-07T10:45:56 | 152,613,900 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 477 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-04-30 10:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tests', '0002_auto_20180301_2340'),
]
operations = [
migrations.AddField(
model_name='radiology',
name='lab_name',
field=models.TextField(blank=True, max_length=100, null=True),
),
]
| [
"[email protected]"
] | |
872419719c7dbbacb98e8fe230dda1d95c9a3bb5 | effab126713f246b35f43da6e24060fb5dbf7335 | /dpxdt/server/utils.py | 965851c3ad359459aff494c7ace9ec690aa87908 | [
"Apache-2.0"
] | permissive | jujugrrr/dpxdt | 0a4552a2a87739e972960016881a36b6cd31648d | ee579f6027d0349e971a3eab070dad5756c54dcd | refs/heads/master | 2021-01-23T20:38:32.729314 | 2013-07-01T05:38:24 | 2013-07-01T05:38:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,478 | py | #!/usr/bin/env python
# Copyright 2013 Brett Slatkin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utility functions."""
import base64
import hashlib
import datetime
import logging
import traceback
import uuid
# Local libraries
import flask
from flask import abort, g, jsonify
# Local modules
from . import app
def jsonify_assert(asserted, message, status_code=400):
"""Asserts something is true, aborts the request if not."""
if asserted:
return
try:
raise AssertionError(message)
except AssertionError, e:
stack = traceback.extract_stack()
stack.pop()
logging.error('Assertion failed: %s\n%s',
str(e), ''.join(traceback.format_list(stack)))
abort(jsonify_error(e, status_code=status_code))
def jsonify_error(message_or_exception, status_code=400):
"""Returns a JSON payload that indicates the request had an error."""
if isinstance(message_or_exception, Exception):
message = '%s: %s' % (
message_or_exception.__class__.__name__, message_or_exception)
else:
message = message_or_exception
response = jsonify(error=message)
response.status_code = status_code
return response
# Based on http://flask.pocoo.org/snippets/33/
@app.template_filter()
def timesince(when):
"""Returns string representing "time since" or "time until".
Examples:
3 days ago, 5 hours ago, 3 minutes from now, 5 hours from now, now.
"""
if not when:
return ''
now = datetime.datetime.utcnow()
if now > when:
diff = now - when
suffix = 'ago'
else:
diff = when - now
suffix = 'from now'
periods = (
(diff.days / 365, 'year', 'years'),
(diff.days / 30, 'month', 'months'),
(diff.days / 7, 'week', 'weeks'),
(diff.days, 'day', 'days'),
(diff.seconds / 3600, 'hour', 'hours'),
(diff.seconds / 60, 'minute', 'minutes'),
(diff.seconds, 'second', 'seconds'),
)
for period, singular, plural in periods:
if period:
return '%d %s %s' % (
period,
singular if period == 1 else plural,
suffix)
return 'now'
def human_uuid():
"""Returns a good UUID for using as a human readable string."""
return base64.b32encode(
hashlib.sha1(uuid.uuid4().bytes).digest()).lower().strip('=')
def password_uuid():
"""Returns a good UUID for using as a password."""
return base64.b64encode(
hashlib.sha1(uuid.uuid4().bytes).digest()).strip('=')
# From http://flask.pocoo.org/snippets/53/
def after_this_request(func):
if not hasattr(g, 'call_after_request'):
g.call_after_request = []
g.call_after_request.append(func)
return func
@app.after_request
def per_request_callbacks(response):
for func in getattr(g, 'call_after_request', ()):
response = func(response)
return response
| [
"[email protected]"
] | |
fe07dd098da9c672cc8538933b82862e8803d26b | 5cee94279b59f56c39987b94d4a85ca8a4f6966d | /app/asset/urls.py | 0a25a3c193a2987565c8e0ea2c27cd6c82ca1265 | [
"MIT"
] | permissive | S3Infosoft/s3-dam | 19c10965dfb17d20c08f0e78b3b096febd646a96 | 67488be012d42cf5826350bff218db2bde70c5e5 | refs/heads/master | 2021-05-18T03:53:35.365250 | 2020-05-14T13:34:02 | 2020-05-14T13:34:02 | 251,092,565 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 275 | py | from django.urls import path
from . import views
urlpatterns = [
path("uploadPhoto", views.uploadPhoto, name="uploadPhoto"),
path("uploadDocument/", views.uploadDocument, name="uploadDocument"),
path("viewDocument/", views.viewDocument, name="viewDocument"),
]
| [
"[email protected]"
] | |
f9b1b694244da3da3ffd59310e2ccfbf529dcd42 | 497e25618ccb09b6f237bb99400d1595d86e15ab | /src/12_ItemSetsGenerator.py | 6d595fbcc263d9b28b8df68fd1bce336abd6f64b | [
"CC-BY-4.0"
] | permissive | curation-library-t/dataset2 | dec966efb8b7ba1f2f94c69a293c6272df8ebcd5 | 681bd425a34d5ca04888e1c1bceefdf69008365d | refs/heads/master | 2021-01-03T10:03:43.702735 | 2020-03-10T04:03:25 | 2020-03-10T04:03:25 | 240,017,931 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,157 | py | import sys
import urllib
import json
import argparse
import urllib.request
import os
import yaml
import shutil
dir = "../docs/omeka/item_sets"
if os.path.exists(dir):
shutil.rmtree(dir)
os.makedirs(dir, exist_ok=True)
def item_sets_generator():
f = open("settings.yml", "r+")
data = yaml.load(f)
api_url = data["api_url"]
loop_flg = True
page = 1
while loop_flg:
url = api_url + "/item_sets?page=" + str(
page)
print(url)
page += 1
request = urllib.request.Request(url)
response = urllib.request.urlopen(request)
response_body = response.read().decode("utf-8")
data = json.loads(response_body.split('\n')[0])
if len(data) > 0:
for i in range(len(data)):
obj = data[i]
oid = str(obj["o:id"])
with open(dir+"/"+oid+".json", 'w') as outfile:
json.dump(obj, outfile, ensure_ascii=False,
indent=4, sort_keys=True, separators=(',', ': '))
else:
loop_flg = False
if __name__ == "__main__":
item_sets_generator()
| [
"[email protected]"
] | |
130b47b6f853783598aaa0f501090f289177b6d9 | d2c229f74a3ca61d6a22f64de51215d9e30c5c11 | /test/python/circuit/library/test_permutation.py | bf4da582b6ad52f9dbf9fc95e2ab2575ecbaf4ea | [
"Apache-2.0"
] | permissive | 1ucian0/qiskit-terra | 90e8be8a7b392fbb4b3aa9784c641a818a180e4c | 0b51250e219ca303654fc28a318c21366584ccd3 | refs/heads/main | 2023-08-31T07:50:33.568824 | 2023-08-22T01:52:53 | 2023-08-22T01:52:53 | 140,555,676 | 6 | 1 | Apache-2.0 | 2023-09-14T13:21:54 | 2018-07-11T09:52:28 | Python | UTF-8 | Python | false | false | 7,222 | py | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test permutation quantum circuits, permutation gates, and quantum circuits that
contain permutation gates."""
import io
import unittest
import numpy as np
from qiskit import QuantumRegister
from qiskit.test.base import QiskitTestCase
from qiskit.circuit import QuantumCircuit
from qiskit.circuit.exceptions import CircuitError
from qiskit.circuit.library import Permutation, PermutationGate
from qiskit.quantum_info import Operator
from qiskit.qpy import dump, load
class TestPermutationLibrary(QiskitTestCase):
"""Test library of permutation logic quantum circuits."""
def test_permutation(self):
"""Test permutation circuit."""
circuit = Permutation(num_qubits=4, pattern=[1, 0, 3, 2])
expected = QuantumCircuit(4)
expected.swap(0, 1)
expected.swap(2, 3)
expected = Operator(expected)
simulated = Operator(circuit)
self.assertTrue(expected.equiv(simulated))
def test_permutation_bad(self):
"""Test that [0,..,n-1] permutation is required (no -1 for last element)."""
self.assertRaises(CircuitError, Permutation, 4, [1, 0, -1, 2])
class TestPermutationGate(QiskitTestCase):
"""Tests for the PermutationGate class."""
def test_permutation(self):
"""Test that Operator can be constructed."""
perm = PermutationGate(pattern=[1, 0, 3, 2])
expected = QuantumCircuit(4)
expected.swap(0, 1)
expected.swap(2, 3)
expected = Operator(expected)
simulated = Operator(perm)
self.assertTrue(expected.equiv(simulated))
def test_permutation_bad(self):
"""Test that [0,..,n-1] permutation is required (no -1 for last element)."""
self.assertRaises(CircuitError, PermutationGate, [1, 0, -1, 2])
def test_permutation_array(self):
"""Test correctness of the ``__array__`` method."""
perm = PermutationGate([1, 2, 0])
# The permutation pattern means q1->q0, q2->q1, q0->q2, or equivalently
# q0'=q1, q1'=q2, q2'=q0, where the primed values are the values after the
# permutation. The following matrix is the expected unitary matrix for this.
# As an example, the second column represents the result of applying
# the permutation to |001>, i.e. to q2=0, q1=0, q0=1. We should get
# q2'=q0=1, q1'=q2=0, q0'=q1=0, that is the state |100>. This corresponds
# to the "1" in the 5 row.
expected_op = np.array(
[
[1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
]
)
self.assertTrue(np.array_equal(perm.__array__(dtype=int), expected_op))
def test_pattern(self):
"""Test the ``pattern`` method."""
pattern = [1, 3, 5, 0, 4, 2]
perm = PermutationGate(pattern)
self.assertTrue(np.array_equal(perm.pattern, pattern))
def test_inverse(self):
"""Test correctness of the ``inverse`` method."""
perm = PermutationGate([1, 3, 5, 0, 4, 2])
# We have the permutation 1->0, 3->1, 5->2, 0->3, 4->4, 2->5.
# The inverse permutations is 0->1, 1->3, 2->5, 3->0, 4->4, 5->2, or
# after reordering 3->0, 0->1, 5->2, 1->3, 4->4, 2->5.
inverse_perm = perm.inverse()
expected_inverse_perm = PermutationGate([3, 0, 5, 1, 4, 2])
self.assertTrue(np.array_equal(inverse_perm.pattern, expected_inverse_perm.pattern))
class TestPermutationGatesOnCircuit(QiskitTestCase):
"""Tests for quantum circuits containing permutations."""
def test_append_to_circuit(self):
"""Test method for adding Permutations to quantum circuit."""
qc = QuantumCircuit(5)
qc.append(PermutationGate([1, 2, 0]), [0, 1, 2])
qc.append(PermutationGate([2, 3, 0, 1]), [1, 2, 3, 4])
self.assertIsInstance(qc.data[0].operation, PermutationGate)
self.assertIsInstance(qc.data[1].operation, PermutationGate)
def test_inverse(self):
"""Test inverse method for circuits with permutations."""
qc = QuantumCircuit(5)
qc.append(PermutationGate([1, 2, 3, 0]), [0, 4, 2, 1])
qci = qc.inverse()
qci_pattern = qci.data[0].operation.pattern
expected_pattern = [3, 0, 1, 2]
# The inverse permutations should be defined over the same qubits but with the
# inverse permutation pattern.
self.assertTrue(np.array_equal(qci_pattern, expected_pattern))
self.assertTrue(np.array_equal(qc.data[0].qubits, qci.data[0].qubits))
def test_reverse_ops(self):
"""Test reverse_ops method for circuits with permutations."""
qc = QuantumCircuit(5)
qc.append(PermutationGate([1, 2, 3, 0]), [0, 4, 2, 1])
qcr = qc.reverse_ops()
# The reversed circuit should have the permutation gate with the same pattern and over the
# same qubits.
self.assertTrue(np.array_equal(qc.data[0].operation.pattern, qcr.data[0].operation.pattern))
self.assertTrue(np.array_equal(qc.data[0].qubits, qcr.data[0].qubits))
def test_conditional(self):
"""Test adding conditional permutations."""
qc = QuantumCircuit(5, 1)
qc.append(PermutationGate([1, 2, 0]), [2, 3, 4]).c_if(0, 1)
self.assertIsNotNone(qc.data[0].operation.condition)
def test_qasm(self):
"""Test qasm for circuits with permutations."""
qr = QuantumRegister(5, "q0")
circuit = QuantumCircuit(qr)
pattern = [2, 4, 3, 0, 1]
permutation = PermutationGate(pattern)
circuit.append(permutation, [0, 1, 2, 3, 4])
circuit.h(qr[0])
expected_qasm = (
"OPENQASM 2.0;\n"
'include "qelib1.inc";\n'
"gate permutation__2_4_3_0_1_ q0,q1,q2,q3,q4 { swap q2,q3; swap q1,q4; swap q0,q3; }\n"
"qreg q0[5];\n"
"permutation__2_4_3_0_1_ q0[0],q0[1],q0[2],q0[3],q0[4];\n"
"h q0[0];\n"
)
self.assertEqual(expected_qasm, circuit.qasm())
def test_qpy(self):
"""Test qpy for circuits with permutations."""
circuit = QuantumCircuit(6, 1)
circuit.cx(0, 1)
circuit.append(PermutationGate([1, 2, 0]), [2, 4, 5])
circuit.h(4)
print(circuit)
qpy_file = io.BytesIO()
dump(circuit, qpy_file)
qpy_file.seek(0)
new_circuit = load(qpy_file)[0]
self.assertEqual(circuit, new_circuit)
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
bd7b345b44537b935cb1c7c58541864a9bd03afd | f9ff85c981942d15c65d37de107e0c5fa5e6a2ba | /pychron/experiment/image_browser.py | b81d4c45f055dacb8347d04b5f6d48c148e91c72 | [
"Apache-2.0"
] | permissive | kenlchen/pychron | 0c729f1b1973b9883734007b7a318fe21669e6c1 | ffd988e27ae09fb3e8a8790d87ff611557911d07 | refs/heads/master | 2021-01-24T21:53:42.293554 | 2016-04-04T07:18:39 | 2016-04-04T07:18:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,888 | py | # ===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from traits.api import HasTraits, Instance, List, Str, Bool, on_trait_change, String, \
Button, Dict, Any
from traitsui.api import View, Item, ListStrEditor, HGroup, VGroup, \
spring, VSplit, Group
from chaco.api import ArrayPlotData, Plot, HPlotContainer
from chaco.tools.image_inspector_tool import ImageInspectorOverlay, \
ImageInspectorTool
from chaco.tools.api import ZoomTool, PanTool
from enable.component_editor import ComponentEditor
from enable.component import Component
# ============= standard library imports ========================
import Image
from numpy import array
import os
import httplib
# ============= local library imports ==========================
from pychron.core.ui.custom_label_editor import CustomLabel
from pychron.database.isotope_database_manager import IsotopeDatabaseManager
from pychron.paths import paths
PORT = 8083
# TEST_IMAGE = Image.open(open('/Users/ross/Sandbox/snapshot001.jpg'))
# TEST_IMAGE = ImageData.fromfile('/Users/ross/Sandbox/foo.png')
class ImageContainer(HasTraits):
container = Instance(HPlotContainer, ())
name = String
def traits_view(self):
v = View(VGroup(
HGroup(spring, CustomLabel('name', color='maroon', size=16,
height=-25,
width=100,
), spring),
Item('container', show_label=False, editor=ComponentEditor()),
))
return v
class ImageSpec(HasTraits):
name = Str
note = Str
def traits_view(self):
v = View(VGroup(Item('name'),
Group(
Item('note', style='custom', show_label=False),
show_border=True,
label='Note'
)
)
)
return v
class ImageEditor(HasTraits):
names = List
selected = Str
save_db = Button('Save to DB')
image_spec = Instance(ImageSpec)
image_specs = Dict
db = Any
# ===============================================================================
# handlers
# ===============================================================================
def _selected_changed(self):
if self.selected in self.image_specs:
spec = self.image_specs[self.selected]
else:
spec = ImageSpec(name=self.selected)
self.image_specs[self.selected] = spec
self.image_spec = spec
def _save_db_fired(self):
db = self.db
print db
def traits_view(self):
v = View(
VSplit(
Item('names', show_label=False,
editor=ListStrEditor(editable=False,
selected='selected',
operations=[]
),
height=0.6
),
Item('image_spec', show_label=False, style='custom',
height=0.4
)
),
Item('save_db', show_label=False)
)
return v
class ImageBrowser(IsotopeDatabaseManager):
# db = Instance(IsotopeAdapter)
image_container = Instance(ImageContainer, ())
image_editor = Instance(ImageEditor)
plot = Instance(Component)
# names = List
# selected = Str
use_cache = Bool(True)
cache_dir = paths.image_cache_dir
_conn = None
def _image_editor_default(self):
im = ImageEditor(db=self.db)
return im
def _is_cached(self, p):
p = os.path.join(self.cache_dir, p)
return os.path.isfile(p)
def load_from_remote_source(self, name):
if self._is_cached(name):
data = self._get_cached(name)
else:
data = self._get_remote_file(name)
self._load_image_data(data)
def load_remote_directory(self, name):
self.info('retrieve contents of remote directory {}'.format(name))
resp = self._get(name)
if resp:
htxt = resp.read()
for li in htxt.split('\n'):
if li.startswith('<li>'):
args = li[4:].split('>')
name, _tail = args[1].split('<')
self.image_editor.names.append(name)
return True
def _connection_factory(self, reset=False):
if reset or self._conn is None:
host, port = 'localhost', 8081
url = '{}:{}'.format(host, port)
conn = httplib.HTTPConnection(url)
else:
conn = self._conn
self._conn = conn
return conn
# def _get(self, name):
# conn = self._connection_factory()
# conn.request('GET', '/{}'.format(name))
# return conn.getresponse()
# def _get_remote_file(self, name):
# self.info('retrieve {} from remote directory'.format(name))
# resp = self._get(name)
#
# buf = StringIO()
# buf.write(resp.read())
# buf.seek(0)
# im = Image.open(buf)
# im = im.convert('RGB')
#
# if self.use_cache:
# buf.seek(0)
# if os.path.isdir(self.cache_dir):
# with open(os.path.join(self.cache_dir, name), 'w') as fp:
# fp.write(buf.read())
# else:
# self.info('cache directory does not exist. {}'.format(self.cache_dir))
#
# buf.close()
#
# return array(im)
def _get_cached(self, name):
self.info('retrieve {} from cache directory'.format(name))
p = os.path.join(self.cache_dir, name)
with open(p, 'r') as rfile:
im = Image.open(rfile)
im = im.convert('RGB')
return array(im)
def _load_image_data(self, data):
cont = HPlotContainer()
pd = ArrayPlotData()
plot = Plot(data=pd, padding=[30, 5, 5, 30], default_origin='top left')
pd.set_data('img', data)
img_plot = plot.img_plot('img',
)[0]
self._add_inspector(img_plot)
self._add_tools(img_plot)
cont.add(plot)
cont.request_redraw()
self.image_container.container = cont
def _add_inspector(self, img_plot):
imgtool = ImageInspectorTool(img_plot)
img_plot.tools.append(imgtool)
overlay = ImageInspectorOverlay(component=img_plot, image_inspector=imgtool,
bgcolor="white", border_visible=True)
img_plot.overlays.append(overlay)
#
def _add_tools(self, img_plot):
zoom = ZoomTool(component=img_plot, tool_mode="box", always_on=False)
pan = PanTool(component=img_plot, restrict_to_data=True)
img_plot.tools.append(pan)
img_plot.overlays.append(zoom)
# ===============================================================================
# handlers
# ===============================================================================
@on_trait_change('image_editor:selected')
def _selected_changed(self):
sel = self.image_editor.selected
if sel:
self.load_from_remote_source(sel)
self.image_container.name = sel
def traits_view(self):
v = View(
HGroup(
Item('image_editor', show_label=False, style='custom',
width=0.3
),
# Item('names', show_label=False, editor=ListStrEditor(editable=False,
# selected='selected',
# operations=[]
# ),
# width=0.3,
# ),
Item('image_container', style='custom',
width=0.7,
show_label=False)
),
# Item('container', show_label=False,
# width=0.7,
# editor=ComponentEditor())),
resizable=True,
height=800,
width=900
)
return v
if __name__ == '__main__':
from pychron.core.helpers.logger_setup import logging_setup
logging_setup('image_viewer')
im = ImageBrowser(cache_dir='/Users/ross/Sandbox/cache')
im.load_remote_directory('')
# im.load_from_remote_source('raster2.png')
# im.load_remote_directory()
# im.names = 'snapshot001.jpg,snapshot002.jpg,snapshot003.jpg,snapshot004.jpg'.split(',')
# im.load_from_remote_source('foo')
# im.load_image_from_file('/Users/ross/Sandbox/diodefailsnapshot.jpg')
im.configure_traits()
# ============= EOF =============================================
| [
"[email protected]"
] | |
2a04439769c443bb7d6866e0bfea6b0721b05f7e | 8f1137592d670ce134821106f736e231b03ead87 | /tools/train.py | 21516fd5c10c33bf28186ffedadee978be1c9997 | [
"MIT"
] | permissive | mousecpn/DMC-Domain-Generalization-for-Underwater-Object-Detection | fa426c834fa2a5cd2fe98c50dd4dfeda64fcdc79 | 133797cfb7553557fb81a37e3c99c88154a13765 | refs/heads/master | 2023-05-23T16:49:34.795363 | 2023-02-13T02:23:31 | 2023-02-13T02:23:31 | 501,597,077 | 16 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,020 | py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import copy
import os
import os.path as osp
import time
import warnings
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.runner import get_dist_info, init_dist
from mmcv.utils import get_git_hash
from mmdet import __version__
from mmdet.apis import set_random_seed, train_detector
from mmdet.datasets import build_dataset
from mmdet.models import build_detector
from mmdet.utils import collect_env, get_root_logger
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--resume-from', help='the checkpoint file to resume from')
parser.add_argument(
'--no-validate',
action='store_true',
help='whether not to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--gpus',
type=int,
help='number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='ids of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file (deprecate), '
'change to --cfg-options instead.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both '
'specified, --options is deprecated in favor of --cfg-options')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options')
args.cfg_options = args.options
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
if args.resume_from is not None:
cfg.resume_from = args.resume_from
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# re-set gpu_ids with distributed training mode
_, world_size = get_dist_info()
cfg.gpu_ids = range(world_size)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
meta['config'] = cfg.pretty_text
# log some basic info
logger.info(f'Distributed training: {distributed}')
logger.info(f'Config:\n{cfg.pretty_text}')
# set random seeds
if args.seed is not None:
logger.info(f'Set random seed to {args.seed}, '
f'deterministic: {args.deterministic}')
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
meta['exp_name'] = osp.basename(args.config)
model = build_detector(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
model.init_weights()
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save mmdet version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmdet_version=__version__ + get_git_hash()[:7],
CLASSES=datasets[0].CLASSES)
# add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES
train_detector(
model,
datasets,
cfg,
distributed=distributed,
validate=(not args.no_validate),
timestamp=timestamp,
meta=meta)
if __name__ == '__main__':
import warnings
warnings.filterwarnings("ignore")
main()
| [
"[email protected]"
] | |
97113931b94c55421f7eaf0342e8779940eeaccc | a2d36e471988e0fae32e9a9d559204ebb065ab7f | /huaweicloud-sdk-ccm/huaweicloudsdkccm/v1/model/distinguished_name.py | 9b1dc6bbb69cd65ec3602053c227ff83175ff3b8 | [
"Apache-2.0"
] | permissive | zhouxy666/huaweicloud-sdk-python-v3 | 4d878a90b8e003875fc803a61414788e5e4c2c34 | cc6f10a53205be4cb111d3ecfef8135ea804fa15 | refs/heads/master | 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,640 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class DistinguishedName:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'common_name': 'str',
'country': 'str',
'locality': 'str',
'organization': 'str',
'organizational_unit': 'str',
'state': 'str'
}
attribute_map = {
'common_name': 'common_name',
'country': 'country',
'locality': 'locality',
'organization': 'organization',
'organizational_unit': 'organizational_unit',
'state': 'state'
}
def __init__(self, common_name=None, country=None, locality=None, organization=None, organizational_unit=None, state=None):
"""DistinguishedName - a model defined in huaweicloud sdk"""
self._common_name = None
self._country = None
self._locality = None
self._organization = None
self._organizational_unit = None
self._state = None
self.discriminator = None
if common_name is not None:
self.common_name = common_name
if country is not None:
self.country = country
if locality is not None:
self.locality = locality
if organization is not None:
self.organization = organization
if organizational_unit is not None:
self.organizational_unit = organizational_unit
if state is not None:
self.state = state
@property
def common_name(self):
"""Gets the common_name of this DistinguishedName.
通用名称
:return: The common_name of this DistinguishedName.
:rtype: str
"""
return self._common_name
@common_name.setter
def common_name(self, common_name):
"""Sets the common_name of this DistinguishedName.
通用名称
:param common_name: The common_name of this DistinguishedName.
:type: str
"""
self._common_name = common_name
@property
def country(self):
"""Gets the country of this DistinguishedName.
国家编码
:return: The country of this DistinguishedName.
:rtype: str
"""
return self._country
@country.setter
def country(self, country):
"""Sets the country of this DistinguishedName.
国家编码
:param country: The country of this DistinguishedName.
:type: str
"""
self._country = country
@property
def locality(self):
"""Gets the locality of this DistinguishedName.
地区名称
:return: The locality of this DistinguishedName.
:rtype: str
"""
return self._locality
@locality.setter
def locality(self, locality):
"""Sets the locality of this DistinguishedName.
地区名称
:param locality: The locality of this DistinguishedName.
:type: str
"""
self._locality = locality
@property
def organization(self):
"""Gets the organization of this DistinguishedName.
组织名称
:return: The organization of this DistinguishedName.
:rtype: str
"""
return self._organization
@organization.setter
def organization(self, organization):
"""Sets the organization of this DistinguishedName.
组织名称
:param organization: The organization of this DistinguishedName.
:type: str
"""
self._organization = organization
@property
def organizational_unit(self):
"""Gets the organizational_unit of this DistinguishedName.
组织单元名称
:return: The organizational_unit of this DistinguishedName.
:rtype: str
"""
return self._organizational_unit
@organizational_unit.setter
def organizational_unit(self, organizational_unit):
"""Sets the organizational_unit of this DistinguishedName.
组织单元名称
:param organizational_unit: The organizational_unit of this DistinguishedName.
:type: str
"""
self._organizational_unit = organizational_unit
@property
def state(self):
"""Gets the state of this DistinguishedName.
省市
:return: The state of this DistinguishedName.
:rtype: str
"""
return self._state
@state.setter
def state(self, state):
"""Sets the state of this DistinguishedName.
省市
:param state: The state of this DistinguishedName.
:type: str
"""
self._state = state
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DistinguishedName):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
6ec74b00beccb475fbc8db105e3048f59664ccac | df541a802b2dfa89d3aab14af627358dc7c76e6e | /接口自动化/Frame_relevance/main.py | e79ef2e1e37151445218b5da1d27c4a22b306a77 | [] | no_license | gupan2018/PyAutomation | de966aff91f750c7207c9d3f3dfb488698492342 | 230aebe3eca5799c621673afb647d35a175c74f1 | refs/heads/master | 2021-09-07T19:44:20.710574 | 2017-12-22T15:58:23 | 2017-12-22T15:58:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 731 | py | __author__ = 'Administrator'
'''
根据添加项目--审核--注册--充值--投资,做一个完整的业务流程的自动化代码请求
'''
from Frame_relevance.database import Database
from Frame_relevance.runcase import Runcase
from Frame_relevance.HttpRequest import HttpRequest
path_background = "db_background.conf"
path_test_data = "test_data.conf"
path_http = "http.conf"
path_mode = "mode.conf"
if __name__ == "__main__":
#获取后台数据库连接
cnn_background = Database(path_background).connect_db()
http = HttpRequest(path_http)
runcase = Runcase()
if runcase.run_Case(http,cnn_background, path_test_data) == False:
print("测试失败")
else:
print("测试成功") | [
"[email protected]"
] | |
4aaa44a645f108b0de973b6f7119085e4cfadb95 | 45da48ae0a87f4bb27409bfe2e947b29a2d4a0d0 | /znake/znake/test/test_tools.py | 314367afe4e3c68ddcd06ff272e50992230a2700 | [
"Apache-2.0"
] | permissive | per-bohlin/opensourcelib | 3923165982ae1b2c78602a3485684ded75c28c36 | e48427fd0b5d87ea21484e85d2575c8b8879b9a3 | refs/heads/master | 2020-05-21T21:34:15.112527 | 2019-05-11T16:57:58 | 2019-05-11T16:57:58 | 186,156,987 | 0 | 0 | NOASSERTION | 2019-05-11T16:34:39 | 2019-05-11T16:34:39 | null | UTF-8 | Python | false | false | 1,495 | py | from unittest import TestCase
from unittest.mock import Mock
from znake.tools import _render_isort, _render_yapf, render_flake8_check, render_pydocstyle_check
class TestToolsRenderCommandLine(TestCase):
@staticmethod
def get_ctx(tool):
ctx = Mock()
ctx.znake.static.packages = ['my_package', 'my_other_package']
getattr(ctx.znake.static, tool).flags = ['--my-flag', '--my-other-flag']
return ctx
def test_flake8(self):
ctx = self.get_ctx('flake8')
result = render_flake8_check(ctx)
assert 'flake8' in result
assert '--my-flag --my-other-flag' in result
assert 'my_package my_other_package' in result
def test_isort(self):
ctx = self.get_ctx('isort')
result = _render_isort(ctx, '--EXTRA')
assert 'isort --recursive --EXTRA' in result
assert '--my-flag --my-other-flag' in result
assert 'my_package my_other_package' in result
def test_pydocstyle(self):
ctx = self.get_ctx('pydocstyle')
result = render_pydocstyle_check(ctx)
assert 'pydocstyle' in result
assert '--my-flag --my-other-flag' in result
assert 'my_package my_other_package' in result
def test_yapf(self):
ctx = self.get_ctx('yapf')
result = _render_yapf(ctx, '--EXTRA')
assert 'yapf -p --recursive --EXTRA' in result
assert '--my-flag --my-other-flag' in result
assert 'my_package my_other_package' in result
| [
"[email protected]"
] | |
a704c7629480555065e3767614ff66caab4f1096 | bacabd549ca67204bd3ec22f0f9020a4287aa6c5 | /ui/dispatcher_tab.py | 87144b13fd78c80105599462550a5804ad38e78a | [] | no_license | gladiopeace/csc-manager-ui | 5b8b642695742e906c779bbb18759084ed4791a9 | ec7660b91aed0f8512183b147cb49994c925bc41 | refs/heads/master | 2023-06-26T17:22:31.434146 | 2021-07-23T13:32:33 | 2021-07-23T13:32:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 777 | py | import logging
import tkinter
class DispatcherTab(tkinter.Frame):
def __init__(self, parent, in_callback=None, out_callback=None):
self.logger = logging.getLogger(f'{self.__class__.__name__}', )
self.parent = parent
self.in_callback = in_callback
self.out_callback = out_callback
tkinter.Frame.__init__(self, parent)
def on_visible_in(self, parent, id, str):
self.logger.debug("on_visible_in: parent=%s, ID=%s, str=%s", parent, id, str)
if self.in_callback is not None:
self.in_callback()
def on_visible_out(self, parent, id, str):
self.logger.debug("on_visible_in: parent=%s, ID=%s, str=%s", parent, id, str)
if self.out_callback is not None:
self.out_callback()
| [
"[email protected]"
] | |
fa9a485adbdcf00e88ce9e816a00db9e0b6e9d2a | bdb1c323968cd9d5441a187a29ed7e25a2e4f07e | /cp0/people/management/commands/UPdate.py | e42d81d517445ea10d217cff37e1280e3c12f9e1 | [] | no_license | liangzhaowang/automation_system | beee351dd9f09a51e2b81617ac5bee63023ea9b8 | f77ef433c2366253dc9d9fdb7c54911cb38ed3e8 | refs/heads/master | 2022-02-19T11:07:44.047000 | 2019-09-23T02:16:00 | 2019-09-23T02:16:00 | 209,732,359 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,650 | py | from django.core.management.base import BaseCommand
import json
import time
from people.models import bxtp_m
from people.models import bxtp_o
from Patches import Patches
from people.models import bkc_m
from people.models import bkc_o
from data.models import Task
class Command(BaseCommand):
def handle(self,*args, **options):
while True:
options_time = time.strftime("%Y-%m-%d %H:%M:%S")
print "======================================="
print "Now time is: {0}".format(str(options_time))
self.checkdata()
print "Please wait 30 seconds for next update "
print "======================================="
time.sleep(30)
def checkdata(self):
Bkc_m = bkc_m.objects.all()
Bkc_o = bkc_o.objects.all()
for i in Bkc_m:
id = i.id
if i.eb =='':
tasks_id = i.task
if tasks_id:
tasks = Task.objects.get(id=tasks_id)
config_id = str(tasks.test_config)
if (json.load(open("./data/data/test_configs/" + config_id)).has_key('base_build')):
new_eb = "https://buildbot.sh.intel.com/absp/builders/bxtp_ivi_m-engineering/builds/"+json.load(open("./data/data/test_configs/" + config_id))['build'][0]
print "update eblink({0}).subject to {1}".format(str(i.eb), new_eb)
bkc_m.objects.filter(id=id).update(eb=new_eb)
for i in Bkc_o:
id = i.id
if i.eb =='':
tasks_id = i.task
if tasks_id:
tasks = Task.objects.get(id=tasks_id)
config_id = str(tasks.test_config)
if (json.load(open("./data/data/test_configs/" + config_id)).has_key('base_build')):
new_eb = "https://buildbot.sh.intel.com/absp/builders/master-engineering/builds/"+json.load(open("./data/data/test_configs/" + config_id))['build'][0]
print "update eblink({0}).subject to {1}".format(str(i.eb), new_eb)
bkc_o.objects.filter(id=id).update(eb=new_eb)
all = bxtp_m.objects.all()
for i in all:
id = i.id
data = i.patch.split("/")[5]
p = Patches(data)
if len(p.content):
owner = str(p.owner)
subject = str(p.subject)
status = str(p.status)
track = str(p.track_id)
if i.owner != owner:
print "update patch({0}).owner to {1}".format(str(i.id),owner)
bxtp_m.objects.filter(id=id).update(owner=owner)
if i.subject != subject:
print "update patch({0}).subject to {1}".format(str(i.id),subject)
bxtp_m.objects.filter(id=id).update(subject=subject)
if i.status != status:
print "update patch({0}).status to {1}".format(str(i.id),status)
bxtp_m.objects.filter(id=id).update(status=status)
if i.track != track:
print "update patch({0}).track to {1}".format(str(i.id),track)
bxtp_m.objects.filter(id=id).update(track=track)
else:
print 'Patch_M(%d) error' % id
alls = bxtp_o.objects.all()
for i in alls:
id = i.id
data = i.patch.split("/")[5]
p = Patches(data)
if len(p.content):
owner = str(p.owner)
subject = str(p.subject)
status = str(p.status)
track = str(p.track_id)
if i.owner != owner:
print "update patch({0}).owner to {1}".format(str(i.id),owner)
bxtp_o.objects.filter(id=id).update(owner=owner)
if i.subject != subject:
print "update patch({0}).subject to {1}".format(str(i.id),subject)
bxtp_o.objects.filter(id=id).update(subject=subject)
if i.status != status:
print "update patch({0}).status to {1}".format(str(i.id),status)
bxtp_o.objects.filter(id=id).update(status=status)
if i.track != track:
print "update patch({0}).track to {1}".format(str(i.id),track)
bxtp_o.objects.filter(id=id).update(track=track)
else:
print 'Patch_O(%d) error' % id | [
"[email protected]"
] | |
58204fab93085a4af72d6ba4ffef814213fd27a0 | a9243f735f6bb113b18aa939898a97725c358a6d | /0.14/_downloads/plot_decoding_time_generalization_conditions.py | 98b29ed251d7f7287579cc2c0e3049cdb118be34 | [] | permissive | massich/mne-tools.github.io | 9eaf5edccb4c35831400b03278bb8c2321774ef2 | 95650593ba0eca4ff8257ebcbdf05731038d8d4e | refs/heads/master | 2020-04-07T08:55:46.850530 | 2019-09-24T12:26:02 | 2019-09-24T12:26:02 | 158,233,630 | 0 | 0 | BSD-3-Clause | 2018-11-19T14:06:16 | 2018-11-19T14:06:16 | null | UTF-8 | Python | false | false | 2,991 | py | """
=========================================================================
Decoding sensor space data with generalization across time and conditions
=========================================================================
This example runs the analysis described in [1]_. It illustrates how one can
fit a linear classifier to identify a discriminatory topography at a given time
instant and subsequently assess whether this linear model can accurately
predict all of the time samples of a second set of conditions.
References
----------
.. [1] King & Dehaene (2014) 'Characterizing the dynamics of mental
representations: the temporal generalization method', Trends In
Cognitive Sciences, 18(4), 203-210. doi: 10.1016/j.tics.2014.01.002.
"""
# Authors: Jean-Remi King <[email protected]>
# Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import mne
from mne.datasets import sample
from mne.decoding import GeneralizationAcrossTime
print(__doc__)
# Preprocess data
data_path = sample.data_path()
# Load and filter data, set up epochs
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
events_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
raw = mne.io.read_raw_fif(raw_fname, preload=True)
picks = mne.pick_types(raw.info, meg=True, exclude='bads') # Pick MEG channels
raw.filter(1, 30, method='fft') # Band pass filtering signals
events = mne.read_events(events_fname)
event_id = {'AudL': 1, 'AudR': 2, 'VisL': 3, 'VisR': 4}
decim = 2 # decimate to make the example faster to run
epochs = mne.Epochs(raw, events, event_id, -0.050, 0.400, proj=True,
picks=picks, baseline=None, preload=True,
reject=dict(mag=5e-12), decim=decim, verbose=False)
# We will train the classifier on all left visual vs auditory trials
# and test on all right visual vs auditory trials.
# In this case, because the test data is independent from the train data,
# we test the classifier of each fold and average the respective predictions.
# Define events of interest
triggers = epochs.events[:, 2]
viz_vs_auditory = np.in1d(triggers, (1, 2)).astype(int)
gat = GeneralizationAcrossTime(predict_mode='mean-prediction', n_jobs=1)
# For our left events, which ones are visual?
viz_vs_auditory_l = (triggers[np.in1d(triggers, (1, 3))] == 3).astype(int)
# To make scikit-learn happy, we converted the bool array to integers
# in the same line. This results in an array of zeros and ones:
print("The unique classes' labels are: %s" % np.unique(viz_vs_auditory_l))
gat.fit(epochs[('AudL', 'VisL')], y=viz_vs_auditory_l)
# For our right events, which ones are visual?
viz_vs_auditory_r = (triggers[np.in1d(triggers, (2, 4))] == 4).astype(int)
gat.score(epochs[('AudR', 'VisR')], y=viz_vs_auditory_r)
gat.plot(title="Temporal Generalization (visual vs auditory): left to right")
| [
"[email protected]"
] | |
05c2a6863ff170102ac029bc54b72165cc024208 | f66b8a4d5d2f9f9faeb23a2bbbf6524ec49b2051 | /surreal/__init__.py | 2d52593edffe3efb913070aa7a4219fbe7fb3203 | [
"Apache-2.0"
] | permissive | ducandu/surreal | 237f4188ba270bab7495cb782ed10ee463fe78a7 | 8abfb18538340d50146c9c44f5ecb8a1e7d89ac3 | refs/heads/master | 2020-08-01T14:10:00.327798 | 2019-11-09T15:32:13 | 2019-11-09T15:32:13 | 211,018,247 | 6 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,532 | py | # Copyright 2019 ducandu GmbH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
from surreal.version import __version__
from surreal.makeable import Makeable
from surreal.config import Config
if "SURREAL_HOME" in os.environ:
SURREAL_HOME = os.environ.get("SURREAL_HOME")
else:
SURREAL_HOME = os.path.expanduser('~')
SURREAL_HOME = os.path.join(SURREAL_HOME, ".surreal/")
PATH_EPISODE_LOGS = SURREAL_HOME + "episodes/"
PATH_PREPROCESSING_LOGS = SURREAL_HOME + "preprocessing/"
PATH_SUMMARIES = SURREAL_HOME + "summaries/"
# Create dirs if necessary:
for dir in [SURREAL_HOME, PATH_EPISODE_LOGS, PATH_PREPROCESSING_LOGS, PATH_SUMMARIES]:
if not os.path.exists(dir):
try:
os.makedirs(dir)
except OSError:
pass
__all__ = ["__version__", "Config", "Makeable",
"SURREAL_HOME", "PATH_EPISODE_LOGS", "PATH_PREPROCESSING_LOGS", "PATH_SUMMARIES"
]
| [
"[email protected]"
] | |
0adbdd2bd8d43634f5c96ccc4a3c8740c82a3216 | de4da7c45581f72adaf8e328a89cb3d57fe3613f | /appengine/olamundo/sorteio.py | a6f9e22632289da4d7a3116e0e402b2005a1840c | [] | no_license | ramalho/propython | 2469be7492554762d05f9b0ce5c0dc3a51bd3a18 | 76c2b52755e08d49929cdc2a523db72735240e72 | refs/heads/master | 2022-06-01T22:51:07.659074 | 2022-05-22T18:22:21 | 2022-05-22T18:22:21 | 140,458 | 39 | 13 | null | null | null | null | UTF-8 | Python | false | false | 872 | py | # coding: utf-8
from random import shuffle
def parear(nomes, embaralhar=True):
u'''Dada uma lista de nomes, garar uma lista de pares ordenados
de nomes, onde:
- cada nome aparece uma vez e apenas uma vez em cada posição
- se existe um par a, b, não existirá o par b, a
Exemplos:
>>> parear(['a', 'b'], embaralhar=False)
[('a', 'b'), ('b', 'a')]
>>> parear(['a', 'b', 'c'], embaralhar=False)
[('a', 'b'), ('b', 'c'), ('c', 'a')]
'''
if embaralhar:
nomes = nomes.shuffle()
primeiro = nomes[0]
pares = []
try:
while True:
pares.append(nomes.pop(0), nomes[0])
except IndexError:
print nomes
print pares
if __name__=='__main__':
from doctest import testmod
testmod()
| [
"[email protected]"
] | |
c79903a6216c94eb3633b54c28d4bdfc5e67a99e | fb7efe44f4d9f30d623f880d0eb620f3a81f0fbd | /tools/chrome_proxy/integration_tests/chrome_proxy_pagesets/pass_through.py | cc1d267d32fd0971958e46cfd6e27da2af5628b2 | [
"LGPL-2.0-or-later",
"BSD-3-Clause",
"Apache-2.0",
"LGPL-2.0-only",
"MIT",
"LicenseRef-scancode-unknown",
"LicenseRef-scancode-unknown-license-reference",
"LGPL-2.1-only"
] | permissive | wzyy2/chromium-browser | 2644b0daf58f8b3caee8a6c09a2b448b2dfe059c | eb905f00a0f7e141e8d6c89be8fb26192a88c4b7 | refs/heads/master | 2022-11-23T20:25:08.120045 | 2018-01-16T06:41:26 | 2018-01-16T06:41:26 | 117,618,467 | 3 | 2 | BSD-3-Clause | 2022-11-20T22:03:57 | 2018-01-16T02:09:10 | null | UTF-8 | Python | false | false | 1,318 | py | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from common.chrome_proxy_shared_page_state import ChromeProxySharedPageState
from telemetry.page import page as page_module
from telemetry import story
class PassThroughPage(page_module.Page):
"""
A test page for the chrome proxy pass-through tests.
"""
def __init__(self, url, page_set):
super(PassThroughPage, self).__init__(url=url, page_set=page_set,
shared_page_state_class=ChromeProxySharedPageState)
def RunNavigateSteps(self, action_runner):
super(PassThroughPage, self).RunNavigateSteps(action_runner)
action_runner.ExecuteJavaScript('''
(function() {
var request = new XMLHttpRequest();
request.open("GET", {{ url }});
request.setRequestHeader("Chrome-Proxy-Accept-Transform", "identity");
request.send(null);
})();''', url=self.url)
action_runner.Wait(1)
class PassThroughStorySet(story.StorySet):
""" Chrome proxy test sites """
def __init__(self):
super(PassThroughStorySet, self).__init__()
urls_list = [
'http://check.googlezip.net/image.png',
]
for url in urls_list:
self.AddStory(PassThroughPage(url, self))
| [
"[email protected]"
] | |
e056bd156b04aa2a41d8cfda5e58af59dbed6b8c | 4c514345b4759ed4d17f48565ae66dbd7313a0e8 | /database/match_query.py | 5e7c6e4ee818c1e183fa2b609979a449e1133e02 | [
"MIT"
] | permissive | csteward24/the-blue-alliance | a6f193176b5c3f3eadb73126d14d06ce299c4185 | cb3c5ce9078983306e6c83067ae62f5848ffe290 | refs/heads/master | 2020-12-11T05:45:15.029275 | 2016-03-11T19:49:32 | 2016-03-11T19:49:32 | 53,530,477 | 1 | 0 | null | 2016-03-09T20:45:46 | 2016-03-09T20:45:46 | null | UTF-8 | Python | false | false | 1,616 | py | from google.appengine.ext import ndb
from database.database_query import DatabaseQuery
from models.event import Event
from models.match import Match
class EventMatchesQuery(DatabaseQuery):
CACHE_VERSION = 0
CACHE_KEY_FORMAT = 'event_matches_{}' # (event_key)
def __init__(self, event_key):
self._query_args = (event_key, )
@ndb.tasklet
def _query_async(self):
event_key = self._query_args[0]
matches = yield Match.query(Match.event == ndb.Key(Event, event_key)).fetch_async()
raise ndb.Return(matches)
class TeamEventMatchesQuery(DatabaseQuery):
CACHE_VERSION = 0
CACHE_KEY_FORMAT = 'team_event_matches_{}_{}' # (team_key, event_key)
def __init__(self, team_key, event_key):
self._query_args = (team_key, event_key, )
@ndb.tasklet
def _query_async(self):
team_key = self._query_args[0]
event_key = self._query_args[1]
matches = yield Match.query(
Match.team_key_names == team_key,
Match.event == ndb.Key(Event, event_key)).fetch_async()
raise ndb.Return(matches)
class TeamYearMatchesQuery(DatabaseQuery):
CACHE_VERSION = 0
CACHE_KEY_FORMAT = 'team_year_matches_{}_{}' # (team_key, year)
def __init__(self, team_key, year):
self._query_args = (team_key, year, )
@ndb.tasklet
def _query_async(self):
team_key = self._query_args[0]
year = self._query_args[1]
matches = yield Match.query(
Match.team_key_names == team_key,
Match.year == year).fetch_async()
raise ndb.Return(matches)
| [
"[email protected]"
] | |
d1bcad81775afd0fd7a676303cdc6244b674c149 | ddf7d8f996a0cf66b0e083e0557305b3be4619e5 | /AByteOfPython/Chapter_14/src/example02.py | 42b9837fde2feafcde3ea51705f6a6d1b6fdfbb7 | [] | no_license | archerImagine/myNewJunk | 8fab3e6ada03eee3aebb5c712d50bcfb38bf48b0 | 42fff352f6057f84ab8c81f1debc149881c1e49f | refs/heads/master | 2020-06-16T12:22:30.590672 | 2016-11-29T17:07:23 | 2016-11-29T17:07:23 | 75,103,470 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40 | py | s = raw_input("Enter Something -----> ") | [
"[email protected]"
] | |
47489d28c2824bf881262a0cb690632b0f06a466 | 6eb56f2e3f14f2373be07fe95b1c6fedf1e2d49f | /month04/Django/my_django_test/my_test_site/my_test_site/wsgi.py | 6f99386c8bf33243c58777fcefe3b43edecaa629 | [
"Apache-2.0"
] | permissive | chaofan-zheng/python_leanring_code | fe22b0370cadebf7456477269aff4a35cef0eb41 | 0af44ff39b9ded2c1d2cc96c6d356d21170ac04d | refs/heads/main | 2023-02-28T07:56:46.457552 | 2021-02-10T15:08:33 | 2021-02-10T15:08:33 | 323,584,115 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | """
WSGI config for my_test_site project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'my_test_site.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
d70c588617cd936f303f45bd05a8f14dd95db981 | 9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb | /sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2022_05_02_preview/operations/_trusted_access_roles_operations.py | 63b63ca700ec5026c217f4203ddfdc330e525f96 | [
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] | permissive | openapi-env-test/azure-sdk-for-python | b334a2b65eeabcf9b7673879a621abb9be43b0f6 | f61090e96094cfd4f43650be1a53425736bd8985 | refs/heads/main | 2023-08-30T14:22:14.300080 | 2023-06-08T02:53:04 | 2023-06-08T02:53:04 | 222,384,897 | 1 | 0 | MIT | 2023-09-08T08:38:48 | 2019-11-18T07:09:24 | Python | UTF-8 | Python | false | false | 7,294 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(location: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-05-02-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/locations/{location}/trustedAccessRoles",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"location": _SERIALIZER.url("location", location, "str", min_length=1),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class TrustedAccessRolesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.containerservice.v2022_05_02_preview.ContainerServiceClient`'s
:attr:`trusted_access_roles` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, location: str, **kwargs: Any) -> Iterable["_models.TrustedAccessRole"]:
"""List supported trusted access roles.
List supported trusted access roles.
:param location: The name of Azure region. Required.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either TrustedAccessRole or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2022_05_02_preview.models.TrustedAccessRole]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-05-02-preview"))
cls: ClsType[_models.TrustedAccessRoleListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
location=location,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("TrustedAccessRoleListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {
"url": "/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/locations/{location}/trustedAccessRoles"
}
| [
"[email protected]"
] | |
52e6250f9fd41a90fb895ac16801170303863407 | 6be29c75fe23bf38ac2df4125242e767fb37d41c | /tests/parsers/sqlite_plugins/interface.py | c0f3af0d7986a8e2dc78f12799da84559dca1a96 | [
"Apache-2.0"
] | permissive | Laxman-SM/plaso | 579c7954b2622368427740e2b5687bf2efe249e7 | bec7b974ec9c2967be58fc704afca936591e46d3 | refs/heads/master | 2021-01-22T05:32:59.383909 | 2017-05-26T04:15:29 | 2017-05-26T04:15:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,497 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the SQLite plugin interface."""
import sys
import unittest
from plaso.containers import time_events
from plaso.lib import eventdata
from plaso.lib import timelib
from plaso.parsers import sqlite
from plaso.parsers.sqlite_plugins import interface
from tests import test_lib as shared_test_lib
from tests.parsers.sqlite_plugins import test_lib
class TestSQLitePlugin(interface.SQLitePlugin):
"""Convenience class for a test SQLite plugin."""
NAME = u'test'
QUERIES = [(
u'SELECT Field1, Field2, Field3 FROM MyTable', u'ParseMyTableRow')]
REQUIRED_TABLES = frozenset([u'MyTable'])
SCHEMAS = [
{u'MyTable':
u'CREATE TABLE "MyTable" ( `Field1` TEXT, `Field2` INTEGER, '
u'`Field3` BLOB )'}]
def __init__(self):
"""Initializes SQLite plugin."""
super(TestSQLitePlugin, self).__init__()
self.results = []
def ParseMyTableRow(self, parser_mediator, row, **unused_kwargs):
"""Parses a MyTable row.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
row: The row resulting from the query.
"""
file_entry = parser_mediator.GetFileEntry()
path_spec = file_entry.path_spec
location = path_spec.location
from_wal = location.endswith(u'-wal')
# Note that pysqlite does not accept a Unicode string in row['string'] and
# will raise "IndexError: Index must be int or string".
# Also, Field3 needs to be converted to a string if Python 2 is used
# because it is a read-write buffer.
field3 = row['Field3']
if sys.version_info[0] < 3:
field3 = str(field3)
self.results.append(((row['Field1'], row['Field2'], field3), from_wal))
event = time_events.TimestampEvent(
timelib.Timestamp.NONE_TIMESTAMP, eventdata.EventTimestamp.NOT_A_TIME,
data_type=u'fake')
event.field1 = row['Field1']
event.field2 = row['Field2']
event.field3 = field3
event.from_wal = location.endswith(u'-wal')
parser_mediator.ProduceEvent(event)
class SQLiteInterfaceTest(test_lib.SQLitePluginTestCase):
"""Tests for the SQLite plugin interface."""
@shared_test_lib.skipUnlessHasTestFile([u'wal_database.db'])
@shared_test_lib.skipUnlessHasTestFile([u'wal_database.db-wal'])
def testProcessWithWAL(self):
"""Tests the Process function on a database with WAL file."""
plugin_object = TestSQLitePlugin()
cache = sqlite.SQLiteCache()
wal_file = self._GetTestFilePath([u'wal_database.db-wal'])
self._ParseDatabaseFileWithPlugin(
[u'wal_database.db'], plugin_object, cache=cache, wal_path=wal_file)
expected_results = [
((u'Committed Text 1', 1, b'None'), False),
((u'Committed Text 2', 2, b'None'), False),
((u'Deleted Text 1', 3, b'None'), False),
((u'Committed Text 3', 4, b'None'), False),
((u'Committed Text 4', 5, b'None'), False),
((u'Deleted Text 2', 6, b'None'), False),
((u'Committed Text 5', 7, b'None'), False),
((u'Committed Text 6', 8, b'None'), False),
((u'Committed Text 7', 9, b'None'), False),
((u'Unhashable Row 1', 10, b'Binary Text!\x01\x02\x03'), False),
((u'Modified Committed Text 3', 4, b'None'), True),
((u'Unhashable Row 2', 11, b'More Binary Text!\x01\x02\x03'), True),
((u'New Text 1', 12, b'None'), True),
((u'New Text 2', 13, b'None'), True)]
self.assertEqual(expected_results, plugin_object.results)
@shared_test_lib.skipUnlessHasTestFile([u'wal_database.db'])
def testProcessWithoutWAL(self):
"""Tests the Process function on a database without WAL file."""
plugin_object = TestSQLitePlugin()
cache = sqlite.SQLiteCache()
self._ParseDatabaseFileWithPlugin(
[u'wal_database.db'], plugin_object, cache=cache)
expected_results = [
((u'Committed Text 1', 1, b'None'), False),
((u'Committed Text 2', 2, b'None'), False),
((u'Deleted Text 1', 3, b'None'), False),
((u'Committed Text 3', 4, b'None'), False),
((u'Committed Text 4', 5, b'None'), False),
((u'Deleted Text 2', 6, b'None'), False),
((u'Committed Text 5', 7, b'None'), False),
((u'Committed Text 6', 8, b'None'), False),
((u'Committed Text 7', 9, b'None'), False),
((u'Unhashable Row 1', 10, b'Binary Text!\x01\x02\x03'), False)]
self.assertEqual(expected_results, plugin_object.results)
@shared_test_lib.skipUnlessHasTestFile([u'wal_database.db'])
@shared_test_lib.skipUnlessHasTestFile([u'wal_database.db-wal'])
def testSchemaMatching(self):
"""Tests the Schema matching capabilities."""
plugin_object = TestSQLitePlugin()
cache = sqlite.SQLiteCache()
# Test matching schema.
storage_writer = self._ParseDatabaseFileWithPlugin(
[u'wal_database.db'], plugin_object, cache=cache)
self.assertTrue(storage_writer.events)
for event in storage_writer.events:
self.assertTrue(event.schema_match)
# Test schema change with WAL.
wal_file = self._GetTestFilePath([u'wal_database.db-wal'])
storage_writer = self._ParseDatabaseFileWithPlugin(
[u'wal_database.db'], plugin_object, cache=cache, wal_path=wal_file)
self.assertTrue(storage_writer.events)
for event in storage_writer.events:
if event.from_wal:
self.assertFalse(event.schema_match)
else:
self.assertTrue(event.schema_match)
# Add schema change from WAL file and test again.
plugin_object.SCHEMAS.append(
{u'MyTable':
u'CREATE TABLE "MyTable" ( `Field1` TEXT, `Field2` INTEGER, `Field3` '
u'BLOB , NewField TEXT)',
u'NewTable':
u'CREATE TABLE NewTable(NewTableField1 TEXT, NewTableField2 TEXT)'})
storage_writer = self._ParseDatabaseFileWithPlugin(
[u'wal_database.db'], plugin_object, cache=cache, wal_path=wal_file)
self.assertTrue(storage_writer.events)
for event in storage_writer.events:
self.assertTrue(event.schema_match)
# Test without original schema.
del plugin_object.SCHEMAS[0]
storage_writer = self._ParseDatabaseFileWithPlugin(
[u'wal_database.db'], plugin_object, cache=cache, wal_path=wal_file)
self.assertTrue(storage_writer.events)
for event in storage_writer.events:
if event.from_wal:
self.assertTrue(event.schema_match)
else:
self.assertFalse(event.schema_match)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
8e057d2ce2408c7882a9090ddf70294e23500b13 | 65a32b8a8a97c126843d2cfe79c43193ac2abc23 | /chapter16/fib_test_2.py | c722f4d40ff1ba55c39a6f1db4cb394a86e1710c | [] | no_license | zhuyuedlut/advanced_programming | 9af2d6144e247168e492ddfb9af5d4a5667227c4 | a6e0456dd0b216b96829b5c3cef11df706525867 | refs/heads/master | 2023-03-19T09:21:31.234000 | 2020-10-09T13:09:38 | 2020-10-09T13:09:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 484 | py | def memoize(f):
memo = {}
def helper(x):
if x not in memo:
memo[x] = f(x)
return memo[x]
return helper
@memoize
def fib(n):
if n == 0:
return 0
elif n == 1:
return 1
else:
return fib(n-1) + fib(n-2)
def fib_seq(n):
res = []
if n > 0:
res.extend(fib_seq(n-1))
res.append(fib(n))
return res
if __name__ == "__main__":
fib_seq(30)
import cProfile
cProfile.run('fib_seq(30)')
| [
"[email protected]"
] | |
efd270ebc5f0c2afb38079f40437d2b1cbb838fc | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02711/s964630757.py | 7962511762d3bf1f82fb5eb8111eb4c6e5b884a7 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 74 | py | n = list(input())
if n.count("7") == 0:
print("No")
else:
print("Yes") | [
"[email protected]"
] | |
94f28c8e8785258f7272399beffdb289c6c802c0 | 035f7cbf8a16d2936b3df13c3966e954c6b6a13a | /lab/soc_module.py | e91f1b4bcb8ea1ab10d5cadb5d859c11f9bb6aed | [
"BSD-3-Clause"
] | permissive | 2i2c-org/utoronto-demo | 84d043b446f4c8ed5f5375175ac482deba8c2955 | 8e2cd4a9f04e3399bc2437e95975c80f5899cef1 | refs/heads/master | 2022-11-13T11:12:33.333581 | 2020-07-07T17:00:24 | 2020-07-07T17:00:24 | 277,604,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,475 | py | import pandas as pd
import os
import folium
import geojson
import random
import numpy as np
from sklearn import preprocessing
import re
from geopy.geocoders import Nominatim
import time
import certifi
import ssl
import geopy.geocoders
# ctx = ssl.create_default_context(cafile=certifi.where())
# geopy.geocoders.options.default_ssl_context = ctx
def html_popup(title, comment, imgpath, data):
"""Format the image data into html.
:params title, comment, imgpath, data: strings"""
html = """
<h3>TITLE</h3>
<img
src = IMGPATH
style="width:180px;height:128px;"
>
<p>
"COMMENT"
</p>
<p>
DATA
</p>
"""
html = html.replace(
"TITLE",
title).replace(
"COMMENT",
comment).replace(
"IMGPATH",
imgpath).replace(
"DATA",
data)
return html
def fix_tract(t):
"""Clean up census tract names.
:param t: Series of string tract names
:returns: Series of cleaned tract names
"""
if type(t) == str:
return t
return str(t).rstrip("0").rstrip(".")
def get_coords(data, alameda, user_agent):
"""Get the geographical coordinates (latitude and longitude) of a
list of street addresses.
:param data: DataFrame with student responses from Google form
:param alameda: GeoJSON data for Alameda county
:user_agent: string user agent for OpenStreetMap
:returns: "data" dataframe with appended column of coordinates
"""
tracts = folium.features.GeoJson(alameda)
tract_centroids = get_centroids(alameda)
data['Census Tract'] = data['Census Tract'].apply(fix_tract)
for j in np.arange(1, 6):
image_coords = []
for i, row in data.iterrows():
tract = row['Census Tract']
if not pd.isnull(row['Full Address of Block Face in Image #' + str(j) + ' (Street Number, Street Name, City, State, Zip Code). E.g.: 2128 Oxford Street, Berkeley, CA, 94704.']):
address = row['Full Address of Block Face in Image #' + str(j) + ' (Street Number, Street Name, City, State, Zip Code). E.g.: 2128 Oxford Street, Berkeley, CA, 94704.']
geocoder = Nominatim(user_agent=user_agent, timeout=3)
loc = geocoder.geocode(address)
if loc is None :
if len(tract) == 3:
tract += "0"
coords = tract_centroids[tract]
else:
coords = [loc.latitude, loc.longitude]
image_coords.append(coords)
elif not pd.isnull(row['Image #' + str(j)]):
image_coords.append(tract_centroids[tract])
else:
image_coords.append('NaN')
time.sleep(0.5)
data['Image #' + str(j)+ ' coordinates'] = image_coords
return data
def get_centroids(geojson):
"""Get census tract centroids.
:param geojson: a GeoJSON file with census tract location data
:returns: a dictionary with tract names mapped to coordinate tuples"""
tract_centroids = {}
for t in geojson['features']:
lat = t['properties']['intptlat10']
lon = t['properties']['intptlon10']
name = t['properties']['name10']
tract_centroids[name] = (float(lat), float(lon))
return tract_centroids
def map_data(myMap, alameda, obs_data):
"""Map student observations.
:param myMap: Folium Map object
:param alameda: GeoJSON of alameda county census tracts
:param obs_data: DataFrame image addresses and coordinates
:returns: Folium Map object with markers for student data
"""
# add tract outlines
tracts = folium.features.GeoJson(alameda)
tract_centroids = get_centroids(alameda)
myMap.add_child(tracts)
# transfer Table to pandas
obs_data = obs_data.to_df()
for t in list(set(set(obs_data['Census Tract']))):
subset = obs_data[obs_data['Census Tract'] == t]
markers = []
popups = []
for i, row in subset.iterrows():
for j in np.arange(1, 6):
if not pd.isnull(row['Image #' + str(j)]):
try:
image_url = row['Image #' + str(j)].replace(
"open?", "uc?export=download&")
except:
image_url = "NA"
coords = [float(coords) for coords in re.findall('-?[0-9]+.[0-9]+', row['Image #' + str(j) + ' coordinates'])]
# if there aren't coords of format [lat, lon] the loop skips this iteration
if len(coords) != 2:
continue
tract = str(row['Census Tract'])
comment = row["Other thoughts or comments for Image #" + str(j)]
if not isinstance(comment, str):
comment = "NA"
data = np.mean([row[i] for i in range(5, 14)
if type(row[i]) in [int, float]])
html = html_popup(
title="Tract: " + tract,
comment=comment,
imgpath=image_url,
data="")
popup = folium.Popup(
folium.IFrame(
html=html,
width=200,
height=300),
max_width=2650
)
markers += [coords]
popups += [popup]
marker_cluster = folium.plugins.MarkerCluster(locations=markers, popups=popups).add_to(myMap)
return myMap
def minmax_scale(x):
"""Scales values in array to range (0, 1)
:param x: array of values to scale
"""
if min(x) == max(x):
return x * 0
return (x - min(x)) / (max(x) - min(x))
def scale_values(tbl, columns):
"""Scale values in a dataframe using MinMax scaling.
:param tbl: Table
:param columns: iterable with names of columns to be scaled
:returns: Table with scaled columns
"""
new_tbl = tbl.copy()
for col in columns:
name = new_tbl.labels[col]
x_scaled = minmax_scale(new_tbl[name])
new_tbl[name] = x_scaled
return new_tbl
# NO LONGER USED as of Fall 2018
def choropleth_overlay(mapa, column_name, joined, alameda):
"""Add a choropleth overlay to a map.
:param mapa: Folium Map object
:param column_name: string column name with data to overlay
:param joined:
:param alameda: GeoJSON Alameda county census tract data
:returns: mapa with a chloropleth overlay
"""
# add tract outlines
tracts = folium.features.GeoJson(alameda)
tract_centroids = get_centroids(alameda)
mapa.add_child(tracts)
threshold_scale = np.linspace(
joined[column_name].min(),
joined[column_name].max(),
6,
dtype=float).tolist()
mapa = folium.Map(location=(37.8044, -122.2711), zoom_start=11)
mapa.choropleth(geo_data=alameda,
data=joined,
columns=['Census Tract', column_name],
fill_color='YlOrRd',
key_on='feature.properties.name10',
threshold_scale=threshold_scale)
return mapa
| [
"[email protected]"
] | |
78c4520f26fc5a405e8b5516a71476aa9983b266 | 61f9c7094be028e040d1234f05ee6d7370c2206d | /pytext/models/decoders/mlp_decoder_n_tower.py | 6e2e0c6810eef0ce327e48063f8f785200ccca9b | [
"BSD-3-Clause"
] | permissive | timgates42/pytext | 3ce5473fecca5174108a4eb63209a3eecfb6d8dd | 5f2c3ca6c3ba56e1001e95825abd7ee295de1dff | refs/heads/main | 2023-03-15T07:33:21.217159 | 2022-07-11T16:06:16 | 2022-07-11T16:06:16 | 231,028,915 | 0 | 0 | NOASSERTION | 2019-12-31T05:04:01 | 2019-12-31T05:04:00 | null | UTF-8 | Python | false | false | 3,970 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import List
import torch
import torch.nn as nn
from pytext.config.module_config import Activation
from pytext.models.decoders.decoder_base import DecoderBase
from pytext.optimizer import get_activation
from pytext.utils import precision
from pytext.utils.usage import log_class_usage
# Export types are now ints
# -1 represents ExportType.None
# eg: to export from tower 0, set your export type to 0
#
class MLPDecoderNTower(DecoderBase):
"""
Implements an 'n-tower' MLPDecoder
"""
class Config(DecoderBase.Config):
# Intermediate hidden dimensions
tower_specific_hidden_dims: List[List[int]] = []
hidden_dims: List[int] = []
layer_norm: bool = False
dropout: float = 0.0
activation: Activation = Activation.RELU
def __init__(
self,
config: Config,
tower_dims: List[int],
to_dim: int,
export_type=-1,
) -> None:
super().__init__(config)
for i in range(len(tower_dims)):
setattr(
self,
f"tower_mlp_{i}",
MLPDecoderNTower.get_mlp(
tower_dims[i],
0,
config.tower_specific_hidden_dims[i],
config.layer_norm,
config.dropout,
config.activation,
export_embedding=True,
),
)
from_dim = 0
for dims in config.tower_specific_hidden_dims:
from_dim += dims[-1]
self.mlp = MLPDecoderNTower.get_mlp(
from_dim,
to_dim,
config.hidden_dims,
config.layer_norm,
config.dropout,
config.activation,
)
self.out_dim = to_dim
self.export_type = export_type
log_class_usage
@staticmethod
def get_mlp(
from_dim: int,
to_dim: int,
hidden_dims: List[int],
layer_norm: bool,
dropout: float,
activation: Activation,
export_embedding: bool = False,
):
layers = []
for i in range(len(hidden_dims)):
dim = hidden_dims[i]
layers.append(nn.Linear(from_dim, dim, True))
# Skip ReLU, LayerNorm, and dropout for the last layer if export_embedding
if not (export_embedding and i == len(hidden_dims) - 1):
layers.append(get_activation(activation))
if layer_norm:
layers.append(nn.LayerNorm(dim))
if dropout > 0:
layers.append(nn.Dropout(dropout))
from_dim = dim
if to_dim > 0:
layers.append(nn.Linear(from_dim, to_dim, True))
return nn.Sequential(*layers)
def forward(self, *x: List[torch.Tensor]) -> torch.Tensor:
# as per the associated model's arrange_model_inputs()
# first half of the list is the token inputs, the second half is the dense features
halfway = len(x) // 2
outputs = []
for i in range(halfway):
if self.export_type == i or self.export_type == -1:
tensor = (
torch.cat((x[i], x[halfway + i]), 1).half()
if precision.FP16_ENABLED
else torch.cat((x[i], x[halfway + i]), 1).float()
)
# len(tensor i) == i's encoder.embedding_dim + i's dense_dim
output = getattr(self, f"tower_mlp_{i}")(tensor)
outputs.append(output)
if self.export_type == i:
return output
return self.mlp(torch.cat(outputs, 1))
def get_decoder(self) -> List[nn.Module]:
return [
getattr(self, f"tower_mlp_{i}")
for i in range(len(self.tower_specific_hidden_dims))
]
| [
"[email protected]"
] | |
fc685360c6f088991a9f5e70892e4903738146a2 | 7daab7f2e91d62ba0383fa050f3dea1dc9752975 | /iniciante/1146_sequencias_crescentes.py | aa039d4e407f490f21aa250399b04ce5be63f343 | [] | no_license | luandadantas/URI-Python | 97ccdaa3835b2d2fa403f148969ca7e893d3f119 | 2cb67f39725b20e6fcbbeaf27d04c4ba05dba665 | refs/heads/master | 2022-12-04T02:51:14.374361 | 2020-08-14T17:59:58 | 2020-08-14T17:59:58 | 255,736,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | while True:
X = int(input())
if X == 0:
break
for i in range(1, X):
print(i,end=" ")
print(X) | [
"[email protected]"
] | |
3574d97da998084641fea6ef4eeadcf842506678 | 0682577346d5be6452f93e17cf06df70acb95135 | /src/bin2header.py | 4755e84c32cb3efc4ebf88c35ecbf0cdd727554f | [
"MIT"
] | permissive | cenit/bin2header | c8bf30cf371378a7f31c4eef37ffbe228fa41638 | 9860f292a0a109a1b999dd3cafe07fdb952a1e18 | refs/heads/dev/msvc | 2023-04-18T02:12:04.133748 | 2021-03-24T19:22:18 | 2021-03-24T20:59:12 | 340,431,411 | 0 | 0 | MIT | 2021-05-02T08:15:26 | 2021-02-19T16:47:07 | C++ | UTF-8 | Python | false | false | 3,545 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright © 2019 Jordan Irwin (AntumDeluge) <[email protected]>
#
# This file is part of the bin2header project & is distributed under the
# terms of the MIT/X11 license. See: LICENSE.txt
import sys, os, array
if sys.version_info.major < 3:
print('\nERROR: Python ' + str(sys.version_info.major) + ' not supported. Please upgrade to Python 3.\n')
sys.exit(2)
__WIN32__ = 'windows' in os.getenv('OS').lower();
version = '0.1.2'
## Normalizes the path for they current system
def NormalizePath(path):
new_path = path
to_replace = '\\'
replace_with = '/'
if __WIN32__:
to_replace = '/'
replace_with = '\\'
new_path = new_path.replace(to_replace, replace_with)
if __WIN32__:
# MSYS2/MinGW paths
if new_path.lower().startswith('\\c\\'):
new_path = 'C:{}'.format(new_path[2:])
return new_path;
def GetBaseName(f):
base_name = os.path.basename(f)
# MSYS versions of Python appear to not understand Windows paths
if __WIN32__ and '\\' in base_name:
base_name = base_name.split('\\')[-1]
return base_name
def GetDirName(f):
dir_name = os.path.dirname(f)
# MSYS versions of Python appear to not understand Windows paths
if not dir_name and __WIN32__:
dir_name = '\\'.join(f.split('\\')[:-1])
return dir_name
def PrintUsage():
executable = os.path.basename(__file__)
print('\nbin2header version {} (Python)\nCopyright © 2019 Jordan Irwin <[email protected]>\n\n\tUsage:\t{} <file>\n'.format(version, executable))
def main(argv):
source_file = NormalizePath(argv[1])
# Check if file exists
if not os.path.isfile(source_file):
print('\nFile "{}" does not exist'.format(source_file))
PrintUsage()
sys.exit(1)
### Get filenames and target directory ###
filename = list(GetBaseName(source_file))
hname = list(filename)
target_dir = GetDirName(source_file)
### Remove Unwanted Characters ###
badchars = ('\\', '+', '-', '*', ' ')
for x in range(len(hname)):
if hname[x] in badchars or hname[x] == '.':
hname[x] = '_'
if filename[x] in badchars:
filename[x] = '_'
filename = ''.join(filename)
hname = ''.join(hname)
target_file = os.path.join(target_dir, filename) + '.h'
### Uppercase Name for Header ###
hname_upper = hname.upper()
hname_upper += '_H'
### Read Data In ###
data = array.array('B', open(source_file, 'rb').read())
### START Read Data Out to Header ###
# adds C++ std::vector support
# TODO: make optional
store_vector = True
# currently only support LF line endings output
outfile = open(target_file, 'w', newline='\n')
text = '#ifndef {0}\n#define {0}\n'.format(hname_upper)
if store_vector:
text += '\n#ifdef __cplusplus\n#include <vector>\n#endif\n'
text += '\nstatic const unsigned char {}[] = {{\n'.format(hname)
current = 0
data_length = len(data)
for byte in data:
if (current % 12) == 0:
text += ' '
text += '0x%02x' % byte
if (current + 1) < data_length:
text += ', '
if (current % 12) == 11:
text += '\n'
current += 1
text += '\n};\n'
if store_vector:
text += '\n#ifdef __cplusplus\nstatic const std::vector<char> ' \
+ hname + '_v(' + hname + ', ' + hname + ' + sizeof(' + hname \
+ '));\n#endif\n'
text +='\n#endif /* {} */\n'.format(hname_upper)
outfile.write(text)
outfile.close()
### END Read Data Out to Header ###
print('Exported to: {}'.format(target_file))
return 0
if __name__ == '__main__':
if len(sys.argv) < 2:
print('\nERROR: Missing <file> argument')
PrintUsage()
sys.exit(1)
main(sys.argv)
| [
"[email protected]"
] | |
021380e13d2eae318fc3807aa19a45be981051fb | e20ed90b9be7a0bcdc1603929d65b2375a224bf6 | /generated-libraries/python/netapp/lun/lun_stats_get_iter_key_td.py | b900171ae83a9786718d2f883283ee90bf322dc9 | [
"MIT"
] | permissive | radekg/netapp-ontap-lib-gen | 530ec3248cff5ead37dc2aa47ced300b7585361b | 6445ebb071ec147ea82a486fbe9f094c56c5c40d | refs/heads/master | 2016-09-06T17:41:23.263133 | 2015-01-14T17:40:46 | 2015-01-14T17:40:46 | 29,256,898 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,238 | py | from netapp.netapp_object import NetAppObject
class LunStatsGetIterKeyTd(NetAppObject):
"""
Key typedef for table lunStats
"""
_key_3 = None
@property
def key_3(self):
"""
Field qtree
"""
return self._key_3
@key_3.setter
def key_3(self, val):
if val != None:
self.validate('key_3', val)
self._key_3 = val
_key_2 = None
@property
def key_2(self):
"""
Field volume
"""
return self._key_2
@key_2.setter
def key_2(self, val):
if val != None:
self.validate('key_2', val)
self._key_2 = val
_key_1 = None
@property
def key_1(self):
"""
Field path
"""
return self._key_1
@key_1.setter
def key_1(self, val):
if val != None:
self.validate('key_1', val)
self._key_1 = val
_key_0 = None
@property
def key_0(self):
"""
Field vserver
"""
return self._key_0
@key_0.setter
def key_0(self, val):
if val != None:
self.validate('key_0', val)
self._key_0 = val
_key_4 = None
@property
def key_4(self):
"""
Field lun
"""
return self._key_4
@key_4.setter
def key_4(self, val):
if val != None:
self.validate('key_4', val)
self._key_4 = val
@staticmethod
def get_api_name():
return "lun-stats-get-iter-key-td"
@staticmethod
def get_desired_attrs():
return [
'key-3',
'key-2',
'key-1',
'key-0',
'key-4',
]
def describe_properties(self):
return {
'key_3': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'key_2': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'key_1': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'key_0': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'key_4': { 'class': basestring, 'is_list': False, 'required': 'optional' },
}
| [
"[email protected]"
] | |
6080e8e27370523a1a15d482bd31e6589aa63cc2 | 1c5f4a13a5d67201b3a21c6e61392be2d9071f86 | /.VirtualEnv/Lib/site-packages/influxdb_client/domain/variable_assignment.py | f36ac8c683bd2b04ca15fcea617fca76518bfe9c | [] | no_license | ArmenFirman/FastAPI-InfluxDB | 19e3867c2ec5657a9428a05ca98818ca7fde5fd0 | b815509c89b5420f72abf514562e7f46dcd65436 | refs/heads/main | 2023-06-24T20:55:08.361089 | 2021-07-29T00:11:18 | 2021-07-29T00:11:18 | 390,462,832 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,256 | py | # coding: utf-8
"""
Influx OSS API Service.
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from influxdb_client.domain.statement import Statement
class VariableAssignment(Statement):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'type': 'str',
'id': 'Identifier',
'init': 'Expression'
}
attribute_map = {
'type': 'type',
'id': 'id',
'init': 'init'
}
def __init__(self, type=None, id=None, init=None): # noqa: E501,D401,D403
"""VariableAssignment - a model defined in OpenAPI.""" # noqa: E501
Statement.__init__(self) # noqa: E501
self._type = None
self._id = None
self._init = None
self.discriminator = None
if type is not None:
self.type = type
if id is not None:
self.id = id
if init is not None:
self.init = init
@property
def type(self):
"""Get the type of this VariableAssignment.
Type of AST node
:return: The type of this VariableAssignment.
:rtype: str
""" # noqa: E501
return self._type
@type.setter
def type(self, type):
"""Set the type of this VariableAssignment.
Type of AST node
:param type: The type of this VariableAssignment.
:type: str
""" # noqa: E501
self._type = type
@property
def id(self):
"""Get the id of this VariableAssignment.
:return: The id of this VariableAssignment.
:rtype: Identifier
""" # noqa: E501
return self._id
@id.setter
def id(self, id):
"""Set the id of this VariableAssignment.
:param id: The id of this VariableAssignment.
:type: Identifier
""" # noqa: E501
self._id = id
@property
def init(self):
"""Get the init of this VariableAssignment.
:return: The init of this VariableAssignment.
:rtype: Expression
""" # noqa: E501
return self._init
@init.setter
def init(self, init):
"""Set the init of this VariableAssignment.
:param init: The init of this VariableAssignment.
:type: Expression
""" # noqa: E501
self._init = init
def to_dict(self):
"""Return the model properties as a dict."""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Return the string representation of the model."""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`."""
return self.to_str()
def __eq__(self, other):
"""Return true if both objects are equal."""
if not isinstance(other, VariableAssignment):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return true if both objects are not equal."""
return not self == other
| [
"[email protected]"
] | |
5137a2d0697ff6d46bc41528064261cdc36a3fcc | 6a2a4f97009e31e53340f1b4408e775f3051e498 | /Iniciante/p2031.py | 89cb880da36fcb459e803f82f4d976c547bff06d | [] | no_license | rafacasa/OnlineJudgePythonCodes | 34c31f325cccb325f074492b40591ad880175816 | 030c18f9020898fdc4f672f9cc17723236e1271d | refs/heads/master | 2023-07-15T12:09:45.534873 | 2023-06-27T00:24:03 | 2023-06-27T00:24:03 | 250,595,994 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 710 | py | qtd = int(input())
for i in range(qtd):
j1 = input()
j2 = input()
if j1 == 'ataque':
if j2 == 'ataque':
print('Aniquilacao mutua')
continue
else:
print('Jogador 1 venceu')
continue
if j1 == 'pedra':
if j2 == 'ataque':
print('Jogador 2 venceu')
continue
if j2 == 'pedra':
print('Sem ganhador')
continue
if j2 == 'papel':
print('Jogador 1 venceu')
continue
if j1 == 'papel':
if j2 == 'papel':
print('Ambos venceram')
continue
else:
print('Jogador 2 venceu')
continue
| [
"[email protected]"
] | |
5891a856beafb4fd1adc7c988adac6905e2e73fe | 45093e6470e866dede760bfb7a082bcbdb540adf | /venv/lib/python3.8/site-packages/rope/refactor/importutils/module_imports.py | 8bd5a186e6e17ee198ee03b1f534451212d6ad0f | [] | no_license | rwinfield/first-bot | 0dee7441d80abdd0d93c58b73518e373a8d0af18 | ff6b2628767d8a7e8ebc6115dbf3658429764490 | refs/heads/master | 2023-08-17T01:58:41.332059 | 2023-08-04T19:25:52 | 2023-08-04T19:25:52 | 151,766,923 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | /home/runner/.cache/pip/pool/d0/68/74/61b82b6d7beeb5ca031738ec3124885ff73d9ca7fecb4faa9c6cae466d | [
"[email protected]"
] | |
c945c8268924f58b87f20e45705848eca360c58a | 59a688e68421794af64bfe69a74f64b2c80cd79d | /graph_theory/utils_graph_theory.py | fd125946005681bf4598d211b4d0027d1ecc27c5 | [] | no_license | hearues-zueke-github/python_programs | f23469b306e057512aadecad0ca0a02705667a15 | d24f04ca143aa93f172210a4b9dfdd9bf1b79a15 | refs/heads/master | 2023-07-26T00:36:56.512635 | 2023-07-17T12:35:16 | 2023-07-17T12:35:16 | 117,093,746 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,290 | py | import numpy as np
# hamilton cycles
def get_cycles_of_1_directed_graph(l_edges_directed):
nodes_from, nodes_to = list(zip(*l_edges_directed))
all_nodes = sorted(set(nodes_from+nodes_to))
unique_nodes_from, counts = np.unique(nodes_from, return_counts=True)
assert np.all(counts==1)
edges_directed_dict = {n1: n2 for n1, n2 in l_edges_directed}
all_available_nodes = set(all_nodes)
list_of_cycles = []
while len(all_available_nodes) > 0:
node_now = all_available_nodes.pop()
lst_nodes = [node_now]
is_found_cycle = False
while True:
node_next = edges_directed_dict[node_now]
node_now = node_next
if not node_next in all_available_nodes:
if node_next in lst_nodes:
lst_nodes = lst_nodes[lst_nodes.index(node_next):]
argmin = np.argmin(lst_nodes)
lst_nodes = lst_nodes[argmin:]+lst_nodes[:argmin]
is_found_cycle = True
break
lst_nodes.append(node_next)
all_available_nodes.remove(node_next)
if is_found_cycle:
list_of_cycles.append(lst_nodes)
list_of_cycles_sorted = sorted(list_of_cycles, key=lambda x: (len(x), x))
return list_of_cycles_sorted
def write_digraph_as_dotfile(path, arr_x, arr_y):
with open(path, 'w') as f:
f.write('digraph {\n')
for x in arr_x:
f.write(f' x{x}[label="{x}"];\n')
f.write('\n')
for x, y in zip(arr_x, arr_y):
f.write(f' x{x} -> x{y};\n')
f.write('}\n')
# d_node_pair_edge = {(0, 1): 2, ...}
# Node 0 to Node 1 with the Edge 2, etc.
# def write_many_digraph_as_dotfile(path, node_from, node_to):
def write_many_digraph_edges_as_dotfile(path, d_node_pair_edge):
with open(path, 'w') as f:
f.write('digraph {\n')
for x in sorted(set(list(map(lambda x: x[0], d_node_pair_edge.keys())))):
f.write(f' x{x}[label="{x}"];\n')
f.write('\n')
for (n1, n2), e in d_node_pair_edge.items():
# for x, y in zip(node_from, node_to):
f.write(f' x{n1} -> x{n2} [label="{e}"];\n')
# f.write(f' x{x} -> x{y} [label="{e}"];\n')
f.write('}\n')
| [
"[email protected]"
] | |
61987d03b4832d555efd81438653636012699b92 | 4da9c19d9839c670fda30a45a7e223da624eee4a | /Codechef Problem solutions/lapindromes.py | 9070865b0cf0bb49ceaf98a28dcc869c6166e819 | [] | no_license | JineshKamdar98/Codchef-Problem-Solutions | 3e1737669cc0657ccc224e06f800b587130f5787 | 4447679aa3fb45a2d57f93bf3f724f6223049506 | refs/heads/master | 2020-05-05T06:38:10.306619 | 2019-04-06T06:16:10 | 2019-04-06T06:16:10 | 179,795,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | t=int(input())
while(t!=0):
s=input()
p=list(s[:len(s)//2])
q=list(s[-(len(s)//2):])
if(sorted(p)==sorted(q)):
print('YES')
else:
print('NO')
t-=1
| [
"[email protected]"
] | |
111dbc26616d818e6e15afdd77e8e66d50541599 | 4bd5e9b67d98bfcc9611bd8b774c9ab9f4f4d446 | /Python基础笔记/12/代码/1.类的定义.py | 611f8b67880c998961906ccb1093d893e099e0c3 | [] | no_license | zhenguo96/test1 | fe21510aea7feb674e52fd7a86d4177666f841c5 | 0d8de7e73e7e635d26462a0bc53c773d999498be | refs/heads/master | 2020-05-03T13:09:53.592103 | 2019-04-06T07:08:47 | 2019-04-06T07:08:47 | 178,646,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,325 | py | """
类:人
属性(静态特征):
姓名
性别
年龄
行为(动态特征):
吃饭
睡觉
打豆豆
"""
"""
1.类名命名规范:
数字、字符、下划线组成、不能以数字开头
不能是保留字
区分大小写
2.命名风格:大驼峰:每个单词首字母大写
3.类体:以冒号开头,必须缩进
"""
class Person:
# 构造方法
"""
方法和函数的区别: 方法第一个参数必须是self,函数没有self
方法定义在类中,作用域属于类,可以和函数重名
方法必须通过对象调用 对象.方法()
"""
def __init__(self):
# 成员属性定义在构造函数中
# self.属性名 = 属性值
self.name = '我'
self.age = 5
self.sex = '男'
# 行为:
def eat(self):
print("吃饭")
def sleepiing(self):
print("睡觉")
def da_doudou(self):
print("打豆豆")
# 实例化对象:类名()
doudou = Person()
print(doudou,type(doudou))
# 调用属性:对象.属性
print(doudou.name,doudou.age)
# 调用方法:对象.方法()
doudou.eat()
doudou.sleepiing()
doudou.da_doudou()
| [
"[email protected]"
] | |
bfdc184ec6e550e1526742ddaa73ce54cdbabee9 | b509b4c3ba811ee5cbbc8ae5a580c78dc66c3437 | /backend/vehicle/migrations/0001_initial.py | d442d5d68a3d15410380299dcfa2ac14cffe213f | [] | no_license | crowdbotics-apps/application-24933 | d0a35800eee010daebae321e321e9f906cbc2e4a | dadd878c63f6d74f4f57d9a04eec818d77ba3595 | refs/heads/master | 2023-03-13T22:34:01.776842 | 2021-03-09T16:02:32 | 2021-03-09T16:02:32 | 346,061,385 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,549 | py | # Generated by Django 2.2.19 on 2021-03-09 16:00
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('taxi_profile', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='VehicleType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('icon', models.URLField()),
('base_rate', models.FloatField()),
],
),
migrations.CreateModel(
name='Vehicle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type_description', models.CharField(max_length=255)),
('plate_number', models.CharField(max_length=10)),
('timestamp_registered', models.DateTimeField(auto_now_add=True)),
('is_on_duty', models.BooleanField(blank=True, null=True)),
('driver', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='vehicle_driver', to='taxi_profile.DriverProfile')),
('vehicle_type', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='vehicle_vehicle_type', to='vehicle.VehicleType')),
],
),
]
| [
"[email protected]"
] | |
fbe04451994e0024a6b2f42914705abc22316a48 | d88360329b36f9c9fd7ee7efb118d45f7dc44f5e | /backend/api/apps.py | c33832e843d3833d7b2d6cddd0f32158ebcdc0e6 | [] | no_license | hyunmin0317/Study-App | 265a19723010b3150eac41fbaea7aa6f229e6140 | 32835258ec6ce0a981f2a359776e944b52adde81 | refs/heads/master | 2023-07-17T23:36:55.645573 | 2021-09-08T06:33:54 | 2021-09-08T06:33:54 | 398,130,964 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 139 | py | from django.apps import AppConfig
class ApiConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'api'
| [
"[email protected]"
] | |
bb19442ce68445b7d0832bfe6249aa389dff37a8 | a8123a86db99b9365b10ba76dd509d58caa7bc10 | /python/practice/start_again/2023/07252023/valid_sudoku.py | bace8ea77bfaad4e5b056c4e5d44463b4ad85bc9 | [] | no_license | smohapatra1/scripting | c0404081da8a10e92e7c7baa8b540acc16540e77 | 3628c9109204ad98231ae8ee92b6bfa6b27e93cd | refs/heads/master | 2023-08-22T20:49:50.156979 | 2023-08-22T20:43:03 | 2023-08-22T20:43:03 | 147,619,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,506 | py | # 36. Valid Sudoku
# Determine if a 9 x 9 Sudoku board is valid. Only the filled cells need to be validated according to the following rules:
# Each row must contain the digits 1-9 without repetition.
# Each column must contain the digits 1-9 without repetition.
# Each of the nine 3 x 3 sub-boxes of the grid must contain the digits 1-9 without repetition.
# Note:
# A Sudoku board (partially filled) could be valid but is not necessarily solvable.
# Only the filled cells need to be validated according to the mentioned rules.
# Example 1:
# Input: board =
# [["5","3",".",".","7",".",".",".","."]
# ,["6",".",".","1","9","5",".",".","."]
# ,[".","9","8",".",".",".",".","6","."]
# ,["8",".",".",".","6",".",".",".","3"]
# ,["4",".",".","8",".","3",".",".","1"]
# ,["7",".",".",".","2",".",".",".","6"]
# ,[".","6",".",".",".",".","2","8","."]
# ,[".",".",".","4","1","9",".",".","5"]
# ,[".",".",".",".","8",".",".","7","9"]]
# Output: true
# Example 2:
# Input: board =
# [["8","3",".",".","7",".",".",".","."]
# ,["6",".",".","1","9","5",".",".","."]
# ,[".","9","8",".",".",".",".","6","."]
# ,["8",".",".",".","6",".",".",".","3"]
# ,["4",".",".","8",".","3",".",".","1"]
# ,["7",".",".",".","2",".",".",".","6"]
# ,[".","6",".",".",".",".","2","8","."]
# ,[".",".",".","4","1","9",".",".","5"]
# ,[".",".",".",".","8",".",".","7","9"]]
# Output: false
# Explanation: Same as Example 1, except with the 5 in the top left corner being modified to 8. Since there are two 8's in the top left 3x3 sub-box, it is invalid.
# Algorithm
# Check if the rows and columns contain values 1-9, without repetition.
# If any row or column violates this condition, the Sudoku board is invalid.
# Check to see if each of the 9 sub-squares contains values 1-9, without repetition. If they do, the Sudoku board is valid; otherwise, it is invalid.
# Checks whether there is any duplicate in current row or not
def NotInRow(arr, row):
st=set()
for i in range(0,9):
# If already encountered before,
# return false
if arr[row][i] in st:
return False
# If it is not an empty cell, insert value
# at the current cell in the set
if arr[row][i] != '.':
st.add(arr[row][i])
return True
# Checks whether there is any duplicate in current column or not
def NotInCol(arr, col):
st=set()
for i in range(0,9):
if arr[i][col] in st:
return False
if arr[i][col] !='.':
st.add(arr[i][col])
return True
# Checks whether there is any duplicate in current 3x3 box or not.
def NotInBox(arr, StartRow, StartCol):
st=set()
for row in range(0,3):
for col in range(0,3):
curr=arr[row + StartRow][col + StartCol]
if curr in st:
return False
if curr != '.':
st.add(curr)
return True
# Checks whether current row and current column and current 3x3 box is valid or not
def isValid(arr,row, col):
return (NotInRow(arr,row) and NotInCol(arr,row) and NotInBox(arr, row-row %3 , col - col %3 ))
def IsValidConfig(arr,n):
for i in range(0,n):
for j in range(0,n):
if not isValid(arr, i, j ):
return False
return True
if __name__ == "__main__":
#Valid
# board = [['5', '3', '.', '.', '7', '.', '.', '.', '.'],
# ['6', '.', '.', '1', '9', '5', '.', '.', '.'],
# ['.', '9', '8', '.', '.', '.', '.', '6', '.'],
# ['8', '.', '.', '.', '6', '.', '.', '.', '3'],
# ['4', '.', '.', '8', '.', '3', '.', '.', '1'],
# ['7', '.', '.', '.', '2', '.', '.', '.', '6'],
# ['.', '6', '.', '.', '.', '.', '2', '8', '.'],
# ['.', '.', '.', '4', '1', '9', '.', '.', '5'],
# ['.', '.', '.', '.', '8', '.', '.', '7', '9']]
#InValid
board = [['8', '3', '.', '.', '7', '.', '.', '.', '.'],
['6', '.', '.', '1', '9', '5', '.', '.', '.'],
['.', '9', '8', '.', '.', '.', '.', '6', '.'],
['8', '.', '.', '.', '6', '.', '.', '.', '3'],
['4', '.', '.', '8', '.', '3', '.', '.', '1'],
['7', '.', '.', '.', '2', '.', '.', '.', '6'],
['.', '6', '.', '.', '.', '.', '2', '8', '.'],
['.', '.', '.', '4', '1', '9', '.', '.', '5'],
['.', '.', '.', '.', '8', '.', '.', '7', '9']]
if IsValidConfig(board, 9 ):
print ("YES")
else:
print ("NO")
| [
"[email protected]"
] | |
fb2a76cd6a85c90f75d740c9a18b28efbc09de86 | e94b018362431ce8b22fe306aa0db23e82362b82 | /tests/common/test_util.py | ead5c31bf85ad9b6d671e2ac78f9c47528fa9607 | [
"MIT"
] | permissive | tybiot/SBMLLint | 71745fb44f2a6e1be83e0d6854aa7b1caa700a4d | f11124c4059f40496454ba1adc814f1bd33c783b | refs/heads/master | 2022-11-13T20:27:48.343268 | 2020-06-28T23:56:40 | 2020-06-28T23:56:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,188 | py | from SBMLLint.common import constants as cn
from SBMLLint.common import exceptions
from SBMLLint.common import util
import libsbml
import numpy as np
import os
import unittest
NUM_S1 = 2
NUM_S2 = 3
IGNORE_TEST = False
ANTIMONY_STG = '''
%dS1 -> %dS2; 1
S1 = 0
S2 = 0
''' % (NUM_S1, NUM_S2)
ZIP_PATH = os.path.join(cn.BIOMODELS_DIR, cn.BIOMODELS_ZIP_FILENAME)
#############################
# Tests
#############################
class TestFunctions(unittest.TestCase):
def testGetXMLString(self):
def test(xml):
reader = libsbml.SBMLReader()
document = reader.readSBMLFromString(xml)
util.checkSBMLDocument(document)
model = document.getModel()
self.assertTrue('Reaction' in str(type(model.getReaction(0))))
def getString(path):
with open(path, 'r') as fd:
lines = '\n'.join(fd.readlines())
return lines
#
for path in [cn.TEST_FILE2, cn.TEST_FILE3]:
try:
test(util.getXML(path))
test(util.getXML(getString(path)))
except exceptions.MissingTelluriumError:
pass
def testGetXMLFromAntimony(self):
try:
xml = util.getXMLFromAntimony(ANTIMONY_STG)
except exceptions.MissingTelluriumError:
return
self.assertTrue(isinstance(xml, str))
reader = libsbml.SBMLReader()
libsbml_document = reader.readSBMLFromString(xml)
util.checkSBMLDocument(libsbml_document)
model = libsbml_document.getModel()
self.assertTrue('Reaction' in
str(type(model.getReaction(0))))
def testIsInt(self):
self.assertTrue(util.isInt(1))
self.assertFalse(util.isInt(1.5))
self.assertFalse(util.isInt('ab'))
def testIsFloat(self):
self.assertTrue(util.isFloat(1))
self.assertTrue(util.isFloat(1.5))
self.assertTrue(util.isFloat('1.5'))
self.assertFalse(util.isFloat('ab'))
def testIsSBMLModel(self):
return
self.assertFalse(util.isSBMLModel("dummy"))
xml = util.getXML(cn.TEST_FILE2)
reader = libsbml.SBMLReader()
document = reader.readSBMLFromString(xml)
util.checkSBMLDocument(document)
model = document.getModel()
self.assertTrue(util.isSBMLModel(model))
def testUniqueify(self):
class Tester():
def __init__(self, name):
self.name = name
def __repr__(self):
return self.name
def isEqual(self, other):
return self.name == other.name
#
STRING = 'abc'
REPEATED_STRING = STRING + STRING
collection = [Tester(s) for s in REPEATED_STRING]
result = util.uniqueify(collection)
self.assertEqual(len(result), len(STRING))
def testGetNextFid(self):
fid = open(ZIP_PATH, "r")
count = 0
for zip_fid in util.getNextFid(fid, is_print=False):
lines = zip_fid.read()
count += 1
self.assertGreater(len(lines), 0)
self.assertGreater(count, 0)
def testRunFunction(self):
def testFunc(a, b=2):
if b == 0:
raise(ValueError)
return a/b
#
self.assertEqual(
util.runFunction(testFunc, [6], {'b': 3}), 2)
result = util.runFunction(testFunc, [6], {'b': 0})
self.assertIsNone(result)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
58467f4df4f61b2e8564e17b1028b7aef8aea879 | 2b5b082ca006eb8063a4a43f4998f4c0268a46e6 | /sessauth2/sessauth2/asgi.py | dcaac02c620f4158a3c708ba0533fcbea01eccc9 | [] | no_license | shobhit1215/Rest_Framework_tut | a52ae5b7a1f0213ace19d9b2b5d557b15d36c376 | 351da2564a55d1530f5517627cce73663252d07c | refs/heads/main | 2023-05-26T04:47:01.336843 | 2021-06-03T12:43:51 | 2021-06-03T12:43:51 | 373,503,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | """
ASGI config for sessauth2 project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sessauth2.settings')
application = get_asgi_application()
| [
"[email protected]"
] | |
b6334ff9ea75bda1417ea705234c6515841b743d | cdfcac165a7c06a137cb3563dbe31d3044494a95 | /6_SGD/1_0_gradientTape.py | 402b93080c204c3e0fdb1f37475164524621c459 | [] | no_license | fzingithub/learningTensorflowProject | 141b3d980a7aa6f729cea18a72ae83d591812c83 | 5607be5f8daeb5591aba719e69b53b34b93d1e03 | refs/heads/master | 2020-05-19T19:05:17.495549 | 2019-09-25T06:48:04 | 2019-09-25T06:48:04 | 185,169,552 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 701 | py | # -*- coding: utf-8 -*-
'''
Created on 2019/5/7
Author: zhe
Email: [email protected]
'''
import tensorflow as tf
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# w = tf.constant(1.)
# x = tf.constant(2.)
# y = x * w
# with tf.GradientTape() as tape:
# tape.watch([w])
# y2 = x * w
#
# grad1 = tape.gradient(y, [w])
# print(grad1)
#
# with tf.GradientTape() as tape:
# tape.watch([w])
# y2 = x * w
#
# grad2 = tape.gradient(y2, [w])
# print(grad2)
# persistent
w = tf.constant(1.)
x = tf.constant(2.)
y = x * w
with tf.GradientTape(persistent=True) as tape:
tape.watch([w])
y2 = x * w
grad = tape.gradient(y2, [w])
print(grad)
grad = tape.gradient(y2, [w])
print(grad) | [
"[email protected]"
] | |
873c85d7134a8df275df8a80775826b5150e310d | ec80586b3aa3e90178a59446b33948012121b56f | /relationship_app/admin.py | 022de5e18e33d4fb2cca13f2b8d97b6f3ba98602 | [] | no_license | amritghimire/se | 567f050969e0e2ad667684f1d6ca03a2f21071bf | 6c150cb75e61b43cc938216a4e5f661d5a97aa11 | refs/heads/master | 2021-03-19T17:24:20.234650 | 2019-09-03T05:55:04 | 2019-09-03T05:55:04 | 112,606,113 | 0 | 0 | null | 2019-09-03T05:56:46 | 2017-11-30T11:57:56 | JavaScript | UTF-8 | Python | false | false | 201 | py | from django.contrib import admin
from .models import Relationship,RelationshipWithQuestion
# Register your models here.
admin.site.register(Relationship)
admin.site.register(RelationshipWithQuestion)
| [
"[email protected]"
] | |
2acba97797af6087bb411fa464e5be1ea2a890ed | c83e356d265a1d294733885c373d0a4c258c2d5e | /mayan/apps/documents/migrations/0071_auto_20201128_0330.py | 6583b82c2de0181c330da3426f188f2204c35e62 | [
"Apache-2.0"
] | permissive | TrellixVulnTeam/fall-2021-hw2-451-unavailable-for-legal-reasons_6YX3 | 4160809d2c96707a196b8c94ea9e4df1a119d96a | 0e4e919fd2e1ded6711354a0330135283e87f8c7 | refs/heads/master | 2023-08-21T23:36:41.230179 | 2021-10-02T03:51:12 | 2021-10-02T03:51:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('documents', '0070_auto_20201128_0249'),
]
operations = [
migrations.RenameField(
model_name='document',
old_name='date_added',
new_name='datetime_created',
),
]
| [
"[email protected]"
] | |
a436ead0e31e3f5f505f43aab6f77de6ca2edc9e | e71fa62123b2b8f7c1a22acb1babeb6631a4549b | /xlsxwriter/test/comparison/test_escapes02.py | 1a1af325a8fe56d1c47442561a85b514ba40e94d | [
"BSD-2-Clause"
] | permissive | timgates42/XlsxWriter | 40480b6b834f28c4a7b6fc490657e558b0a466e5 | 7ad2541c5f12b70be471b447ab709c451618ab59 | refs/heads/main | 2023-03-16T14:31:08.915121 | 2022-07-13T23:43:45 | 2022-07-13T23:43:45 | 242,121,381 | 0 | 0 | NOASSERTION | 2020-02-21T11:14:55 | 2020-02-21T11:14:55 | null | UTF-8 | Python | false | false | 943 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2022, John McNamara, [email protected]
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('escapes02.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with comments.Check encoding of comments."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.write('A1', '"<>\'&')
worksheet.write_comment('B2', """<>&"'""")
worksheet.set_comments_author("""I am '"<>&""")
workbook.close()
self.assertExcelEqual()
| [
"[email protected]"
] | |
a6ffc620d1d3aee1f0cdf209cf463c92bf609284 | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /azure-mgmt-network/azure/mgmt/network/v2018_07_01/models/express_route_circuits_routes_table_list_result.py | 692bb9903130c0dd3e820e34bf91cfa419fcb98f | [
"MIT"
] | permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 1,282 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ExpressRouteCircuitsRoutesTableListResult(Model):
"""Response for ListRoutesTable associated with the Express Route Circuits
API.
:param value: The list of routes table.
:type value:
list[~azure.mgmt.network.v2018_07_01.models.ExpressRouteCircuitRoutesTable]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ExpressRouteCircuitRoutesTable]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ExpressRouteCircuitsRoutesTableListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
| [
"[email protected]"
] | |
9e51529629c36bcf385786cb805d47763c6f5ab2 | 0c325cf7a68ef51067ed8db566d525a20de5b635 | /other/panda365/panda365/pd/api/fields.py | 42906c17d0e2cd22fb5c65f2f94fbe5c1743ff4f | [] | no_license | alinzel/NOTES | 2ab6aa1ef1d601a9ae8c0d23c0df2bca7e1aa241 | 3e0594641a605580e920d0b08a251fbc99f34e2f | refs/heads/master | 2023-01-08T22:48:30.762625 | 2020-01-17T09:14:47 | 2020-01-17T09:14:47 | 175,339,492 | 0 | 0 | null | 2022-12-27T15:01:19 | 2019-03-13T03:28:08 | HTML | UTF-8 | Python | false | false | 6,120 | py | from base64 import b64decode
from io import BytesIO
from marshmallow import fields, validate, ValidationError
from sqlalchemy import inspect
from werkzeug.datastructures import FileStorage
import binascii
import uuid
class DataURL(fields.String):
"""
data url as defined in RFC 2397:
data:[mimetype][;base64],[data]
Usually used only for parsing incoming data.
"""
default_error_messages = {
'malformed': 'cannot be parsed as a data url.',
'padding': 'payload is incorrectly padded',
'mimetype': 'mimetype not allowed',
}
def __init__(self, *args, allowed_mimetypes=None, **kwargs):
if kwargs.get('load_only') is False:
raise ValueError('this field can only be used to load data; '
'however load_only is set to False')
kwargs['load_only'] = True
kwargs.setdefault('description',
'RFC 2397 data url. '
'Format: `data:[mimetype][;base64],[data]`')
super().__init__(*args, **kwargs)
if allowed_mimetypes:
self._allowed_mimetypes = set(allowed_mimetypes)
else:
self._allowed_mimetypes = None
def validate_mimetype(self, mimetype):
if self._allowed_mimetypes and mimetype not in self._allowed_mimetypes:
self.fail('mimetype')
def _deserialize(self, value, attr, obj):
value = super()._deserialize(value, attr, obj)
if not value.startswith('data:'):
self.fail('malformed')
try:
comma_index = value.index(',')
except ValueError:
self.fail('malformed')
# 5 is for "data:"
mimetype, _ = value[5:comma_index].split(';')
if not mimetype:
self.fail('malformed')
self.validate_mimetype(mimetype)
# construct stream from data
try:
# +1 to skip the comma
data = b64decode(value[comma_index + 1:])
except binascii.Error:
self.fail('padding')
name = '{}.{}'.format(uuid.uuid4().hex, mimetype.split('/')[-1])
return FileStorage(
stream=BytesIO(data),
content_type=mimetype,
filename=name,
name=name,
)
class Currency(fields.String):
def _deserialize(self, value, attr, obj):
raise NotImplementedError() # pragma: no cover
def _serialize(self, value, attr, obj):
if value: # pragma: no cover
return {
'code': value.code,
'symbol': value.symbol
}
class Enum(fields.String):
def __init__(self, enum, choices=None, *args, **kwargs):
"""
:param enum: enum used to validate incoming value
:param list choices:
by default all items of the enum are used. If only a subset of the
enum should be used, pass them in here.
Example::
class Status(Enum):
ok = 1
fail = 2
my_dirty_internal_enum_which_should_not_be_told = 3
class FooSchema(Schema):
status = Enum(
enum=Status, choices=[Status.ok, Status.fail])
"""
self._enum = enum
validators = kwargs.setdefault('validate', [])
validators.append(validate.OneOf(choices=choices or enum))
self.default_error_messages.update(
dict(bad_enum='{value} is not a valid choice'))
super().__init__(*args, **kwargs)
def _serialize(self, value, attr, obj):
if value:
return getattr(value, 'name')
def _deserialize(self, value, attr, obj):
value = super()._deserialize(value, attr, obj)
try:
return getattr(self._enum, value)
except AttributeError:
self.fail('bad_enum', value=repr(value))
class ProductInfo(fields.String):
def __init__(self, **kwargs):
kwargs.setdefault(
'description', '''
a list of objects, each has the key `name` and
`value`. Example:
[
{
"name": "Brand",
"value": "Apple"
}, {
"name": "Country",
"value": "China"
}
]
'''
)
super().__init__(**kwargs)
def _serialize(self, value, attr, obj):
if not value:
return
ret = []
for line in value.split('\n'):
k, v = line.split(':')
ret.append(dict(name=k, value=v))
return ret
def _deserialize(self, value, attr, obj):
raise NotImplementedError() # pragma: no cover
class ModelPKField(fields.Integer):
"""A field representing a model instance.
This serializes the value to the id of the model, and deserialize from
a given id to a model instance
:param model_class: a db Model
:param filters: filters to apply when getting the record from id
"""
default_error_messages = {
'notfound': 'record cannot be found',
}
def __init__(self, model_class, *filters, **kwargs):
pks = inspect(model_class).primary_key
if len(pks) > 1: # pragma: no cover
raise ValueError('only support models with 1 primary key')
self.model = model_class
self.filters = filters
self.pk_name = pks[0].name
super().__init__(**kwargs)
# def _serialize(self, value, attr, obj):
# if isinstance(value, self.model):
# return getattr(value, self.pk_name)
def _deserialize(self, value, attr, obj):
value = super()._deserialize(value, attr, obj)
filters = []
for f in self.filters:
if callable(f):
f = f()
filters.append(f)
obj = self.model.query.filter(
getattr(self.model, self.pk_name) == value,
*filters
).first()
if not obj:
raise ValidationError('{} {} cannot be found'.format(
self.model.__name__, value,
))
return obj
| [
"[email protected]"
] | |
3f197feb9ea3a2c2da2ddc7cde1f71136c78662a | 35baf7fe5bb66f2402de400383b8aa426c097bfb | /co2_diag/recipes/seasonal_cycles.py | 95de0cc00e4835814255f8121b1a1b9cd8473448 | [
"BSD-3-Clause"
] | permissive | BunnyVon/gdess | 24f0e7e1e6d2a00dbbcd9e3fa72e65d983b6567a | 09b83b3d7ade133b6d993e010912bee86c24c934 | refs/heads/main | 2023-07-18T06:11:09.937268 | 2021-09-05T19:09:10 | 2021-09-05T19:09:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,919 | py | """ This produces plots of seasonal cycles of atmospheric CO2
This function parses:
- observational data from Globalview+ surface stations
- model output from CMIP6
================================================================================
"""
from co2_diag import set_verbose, benchmark_recipe
from co2_diag.recipe_parsers import parse_recipe_options, add_seasonal_cycle_args_to_parser
from co2_diag.recipes.recipe_utils import populate_station_list
from co2_diag.graphics.comparison_plots import plot_comparison_against_model, plot_lines_for_all_station_cycles
from co2_diag.operations.Confrontation import Confrontation, load_cmip_model_output
from co2_diag.formatters import numstr, append_before_extension
from dask.diagnostics import ProgressBar
from typing import Union
import argparse, logging
_logger = logging.getLogger(__name__)
@benchmark_recipe
def seasonal_cycles(options: Union[dict, argparse.Namespace],
verbose: Union[bool, str] = False,
) -> tuple:
"""Execute a series of preprocessing steps and generate a diagnostic result.
Relevant co2_diag collections are instantiated and processed.
If one station is specified, then that will be compared against model data at the same location
If more than one station is specified, then no model data will be compared against it.
Parameters
----------
options : Union[dict, argparse.Namespace]
Recipe options specified as key:value pairs. It can contain the following keys:
ref_data : str
(required) directory containing the NOAA Obspack NetCDF files
model_name : str, default 'CMIP.NOAA-GFDL.GFDL-ESM4.esm-hist.Amon.gr1'
cmip_load_method : str, default 'pangeo'
either 'pangeo' (which uses a stored url),
or 'local' (which uses the path defined in config file)
start_yr : str, default '1960'
end_yr : str, default '2015'
latitude_bin_size : numeric, default None
figure_savepath : str, default None
difference : str, default None
globalmean : str
either 'station', which requires specifying the <station_code> parameter,
or 'global', which will calculate a global mean
station_list : str, default 'mlo'
a sequence of three letter codes (space-delimited) to specify
the desired surface observing station
verbose : Union[bool, str]
can be either True, False, or a string for level such as "INFO, DEBUG, etc."
Returns
-------
A tuple:
A DataFrame containing the data that were plotted.
A list of the data for each station
A DataFrame containing the metadata for each station
(and if a comparison with a model was made, then the datetimes and values are also part of the returned tuple)
"""
set_verbose(_logger, verbose)
if verbose:
ProgressBar().register()
_logger.debug("Parsing diagnostic parameters...")
opts = parse_recipe_options(options, add_seasonal_cycle_args_to_parser)
stations_to_analyze = populate_station_list(opts.run_all_stations, opts.station_list)
# --- Load CMIP model output ---
compare_against_model, ds_mdl = load_cmip_model_output(opts.model_name, opts.cmip_load_method, verbose=verbose)
conf = Confrontation(compare_against_model, ds_mdl, opts, stations_to_analyze, verbose)
cycles_of_each_station, concatenated_dfs, df_station_metadata, \
xdata_obs, xdata_mdl, ydata_obs, ydata_mdl, \
rmse_y_true, rmse_y_pred = conf.looper(how='seasonal')
# --- Plot the seasonal cycles at all station locations
plot_lines_for_all_station_cycles(xdata_obs, ydata_obs.iloc[:, ::-1], figure_title="GV+",
savepath=append_before_extension(opts.figure_savepath, 'obs_lineplot'))
if ydata_mdl is not None:
# (ii) CMIP data
plot_lines_for_all_station_cycles(xdata_obs, ydata_mdl.iloc[:, ::-1], figure_title="CMIP",
savepath=append_before_extension(opts.figure_savepath, 'mdl_lineplot'))
# (iii) Model - obs difference
ydiff = ydata_mdl - ydata_obs
plot_lines_for_all_station_cycles(xdata_obs, ydiff.iloc[:, ::-1], figure_title="Difference",
savepath=append_before_extension(opts.figure_savepath, 'diff_lineplot'))
# (iv) Model and obs difference
plot_comparison_against_model(xdata_obs, ydata_obs, f'obs',
xdata_obs, ydata_mdl, f'model',
savepath=append_before_extension(opts.figure_savepath, 'overlapped'))
_logger.info("Saved at <%s>" % opts.figure_savepath)
return concatenated_dfs, cycles_of_each_station, df_station_metadata
| [
"[email protected]"
] | |
f78f19d145b126047de673d89f70e08fdc0684a7 | 3cf0d750948a758d5771dd778fbb783d64a044ae | /src/pads/tests/test_lca.py | b11cbc743ce87210e3a1d60e058e855380055c1b | [
"CC-BY-NC-SA-4.0",
"Apache-2.0",
"MIT"
] | permissive | hbulpf/pydemo | 6552a08b3c85721ac1b2ba335b030e234ad03b6c | ea3e9f9086116a86ecef803e9e3179a34c94c20f | refs/heads/master | 2022-11-30T21:06:29.933820 | 2022-01-15T17:05:16 | 2022-01-15T17:05:16 | 237,584,300 | 6 | 1 | Apache-2.0 | 2022-11-22T09:49:38 | 2020-02-01T08:20:43 | Python | UTF-8 | Python | false | false | 1,413 | py | import random
import unittest
from pads.lca import RangeMin
from pads.lca import LogarithmicRangeMin
from pads.lca import LCA
from pads.lca import OfflineLCA
class RandomRangeMinTest(unittest.TestCase):
def testRangeMin(self):
for trial in range(20):
data = [random.choice(range(1000000))
for i in range(random.randint(1,100))]
R = RangeMin(data)
for sample in range(100):
i = random.randint(0,len(data)-1)
j = random.randint(i+1,len(data))
self.assertEqual(R[i:j],min(data[i:j]))
class LCATest(unittest.TestCase):
parent = {'b':'a','c':'a','d':'a','e':'b','f':'b','g':'f','h':'g','i':'g'}
lcas = {
('a','b'):'a',
('b','c'):'a',
('c','d'):'a',
('d','e'):'a',
('e','f'):'b',
('e','g'):'b',
('e','h'):'b',
('c','i'):'a',
('a','i'):'a',
('f','i'):'f',
}
def testLCA(self):
L = LCA(self.parent)
for k,v in self.lcas.items():
self.assertEqual(L(*k),v)
def testLogLCA(self):
L = LCA(self.parent, LogarithmicRangeMin)
for k,v in self.lcas.items():
self.assertEqual(L(*k),v)
def testOfflineLCA(self):
L = OfflineLCA(self.parent, self.lcas.keys())
for (p,q),v in self.lcas.items():
self.assertEqual(L[p][q],v)
| [
"[email protected]"
] | |
c9b58a0e23df735180efc08cacda6fe5dd2b365f | 612e9449ddbe95f1b4a0dd21e13e46661e39c872 | /lib/formats/json.py | 3c6e746b04f54281505f5b27327aa67f161ddd3f | [] | no_license | racposner/label_reconciliations | 0ad22c8250a5d6662e9aeebeb97741146ac8fdac | 4c916994e7f193e6ed0b1c6b18f247239f1d847a | refs/heads/master | 2022-11-27T02:00:51.108238 | 2020-07-28T18:47:53 | 2020-07-28T18:47:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 302 | py | """Import a flat JSON file as unreconciled data."""
import pandas as pd
import lib.util as util
def read(args):
"""Read a JSON file into a data-frame."""
unreconciled = pd.read_json(args.input_file)
unreconciled = util.unreconciled_setup(args, unreconciled)
return unreconciled, {}
| [
"[email protected]"
] | |
8c0e06b23fc473400d9b904b94698f979e0ff6ef | 9b34e542589b7d0d327d3255ac4fcd0bcf5e7216 | /first one from right to left in binary.py | e09a044e3aaf81efa19205fa549a8331750abc86 | [] | no_license | Sravaniram/pythonprogramming | 9ee23cd2ff925fa2c6af320d59643747db173cd7 | 4c09c6787a39b18a12dfcbb2c33fcceabd4fc621 | refs/heads/master | 2020-03-26T23:26:03.391360 | 2019-04-23T12:49:53 | 2019-04-23T12:49:53 | 145,541,824 | 1 | 9 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | n,m=map(int,input().split())
k=bin(n*m)
c=0
for x in range(len(k)-1,1,-1):
c=c+1
if(k[x]=='1'):
print(c)
break
| [
"[email protected]"
] | |
d4b58b2e033e8c716f005be46976aa0c5a9599e7 | 347c70d4851b568e03e83387f77ae81071ab739e | /fn_proofpoint_tap/fn_proofpoint_tap/util/selftest.py | f923ce1c4bd5ea486dbcb877544c0adc313565cb | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | neetinkandhare/resilient-community-apps | 59d276b5fb7a92872143ce2b94edd680738693ce | 3ecdabe6bf2fc08f0f8e58cbe92553270d8da42f | refs/heads/master | 2021-12-27T09:05:36.563404 | 2021-09-29T13:04:56 | 2021-09-29T13:04:56 | 159,804,866 | 1 | 0 | MIT | 2021-08-03T19:45:45 | 2018-11-30T10:07:32 | Python | UTF-8 | Python | false | false | 1,750 | py | # -*- coding: utf-8 -*-
# pragma pylint: disable=unused-argument, no-self-use
# (c) Copyright IBM Corp. 2019. All Rights Reserved.
"""Function implementation
test with: resilient-circuits selftest -l fn_proofpoint_campaign
"""
import logging
import os
from requests.auth import HTTPBasicAuth
from resilient_lib import RequestsCommon, validate_fields
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
log.addHandler(logging.StreamHandler())
def selftest_function(opts):
"""
Placeholder for selftest function. An example use would be to test package api connectivity.
Suggested return values are be unimplemented, success, or failure.
"""
options = opts.get('fn_proofpoint_tap', {})
validate_fields(['base_url', 'username', 'password'], options)
base_url = options.get('base_url')
username = options.get('username')
password = options.get('password')
cafile = options.get('cafile')
bundle = os.path.expanduser(cafile) if cafile else False
basic_auth = HTTPBasicAuth(username, password)
url = '{}/siem/all?format=JSON&sinceSeconds={}'.format(base_url, 300) # /v2/siem/all Fetch events for all clicks and messages relating to known threats within the specified time period
rc = RequestsCommon(opts=opts, function_opts=options)
try:
res = rc.execute_call_v2('get', url, auth=basic_auth, verify=bundle, proxies=rc.get_proxies())
if res.status_code == 200:
return {'state': 'success'}
return {
'state': 'failure',
'reason': 'status code {0}'.format(res.status_code)
}
except Exception as ex:
log.error(ex)
return {
'state': 'failure',
'reason': ex
}
| [
"[email protected]"
] | |
85e1dfdd04e38ad955261cc8e671a25fb7798885 | 26dec2f8f87a187119336b09d90182d532e9add8 | /mcod/histories/apps.py | a7f0a00b9687b8ac862b1830d5b72c8fe03043fe | [] | no_license | olekstomek/mcod-backend-dane.gov.pl | 7008bcd2dbd0dbada7fe535536b02cf27f3fe4fd | 090dbf82c57633de9d53530f0c93dddf6b43a23b | refs/heads/source-with-hitory-from-gitlab | 2022-09-14T08:09:45.213971 | 2019-05-31T06:22:11 | 2019-05-31T06:22:11 | 242,246,709 | 0 | 1 | null | 2020-02-24T22:39:26 | 2020-02-21T23:11:50 | Python | UTF-8 | Python | false | false | 187 | py | from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class HistoriesConfig(AppConfig):
name = 'mcod.histories'
verbose_name = _('Histories')
| [
"[email protected]"
] | |
0bba8246a143872757d6de146020f6d5366ab9fb | 6dcf2d8ce367d6afd64024e5f41d4a11c27ca3d5 | /gmecol/migrations/0002_auto__add_field_game_image_url__add_field_game_remote_id__add_field_pl.py | 293eada3da4be3326dd035ea611a874e7f9c658c | [
"BSD-2-Clause"
] | permissive | iyox/gmecol | 75cc02870958fb0c747f93f62c42868eaf11601b | c03ff0fdfca7cb73fe8646e1ed4543db7d2e6c89 | refs/heads/master | 2021-01-15T16:58:27.692794 | 2012-09-06T03:50:39 | 2012-09-06T03:50:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,919 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Game.image_url'
db.add_column('gmecol_game', 'image_url',
self.gf('django.db.models.fields.TextField')(default=1),
keep_default=False)
# Adding field 'Game.remote_id'
db.add_column('gmecol_game', 'remote_id',
self.gf('django.db.models.fields.IntegerField')(default=1),
keep_default=False)
# Adding field 'Platform.image_url'
db.add_column('gmecol_platform', 'image_url',
self.gf('django.db.models.fields.TextField')(default=1),
keep_default=False)
# Adding field 'Platform.remote_id'
db.add_column('gmecol_platform', 'remote_id',
self.gf('django.db.models.fields.IntegerField')(default=1),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Game.image_url'
db.delete_column('gmecol_game', 'image_url')
# Deleting field 'Game.remote_id'
db.delete_column('gmecol_game', 'remote_id')
# Deleting field 'Platform.image_url'
db.delete_column('gmecol_platform', 'image_url')
# Deleting field 'Platform.remote_id'
db.delete_column('gmecol_platform', 'remote_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'gmecol.game': {
'Meta': {'object_name': 'Game'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_url': ('django.db.models.fields.TextField', [], {}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'platform': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gmecol.Platform']"}),
'remote_id': ('django.db.models.fields.IntegerField', [], {}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
'gmecol.platform': {
'Meta': {'object_name': 'Platform'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_url': ('django.db.models.fields.TextField', [], {}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'remote_id': ('django.db.models.fields.IntegerField', [], {}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
'gmecol.usergame': {
'Meta': {'object_name': 'UserGame'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'for_sale': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'for_trade': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'game': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gmecol.Game']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'rating': ('django.db.models.fields.IntegerField', [], {'default': '3'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gmecol.UserProfile']"})
},
'gmecol.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'games': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['gmecol.Game']", 'through': "orm['gmecol.UserGame']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'platforms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['gmecol.Platform']", 'symmetrical': 'False'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['gmecol']
| [
"[email protected]"
] | |
9c37792e8f94df710b6d3440b26852c0adaa94e4 | 79baf4404e51bdc0f33038b3b16bea86ff09e82f | /azext_iot/tests/digitaltwins/test_dt_generic_unit.py | 0893dc929dab4b65fd9bf2a07dadc07136a01af7 | [
"MIT"
] | permissive | Azure/azure-iot-cli-extension | 80b6cb29e907f7512c7361a85d6bfdea5ae2dd9e | bdbe65c3874ff632c2eba25c762e9ea8e9175b5f | refs/heads/dev | 2023-09-04T10:57:16.118442 | 2023-08-28T17:12:05 | 2023-08-28T17:12:05 | 103,456,760 | 95 | 80 | NOASSERTION | 2023-09-13T00:02:54 | 2017-09-13T22:04:36 | Python | UTF-8 | Python | false | false | 1,280 | py | # coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import pytest
from azext_iot.digitaltwins.providers import generic as subject
class TestLROCheckStateHelper(object):
@pytest.mark.parametrize(
"test_input", [
{},
{"foo": "bar"},
{"provisioning_state": "bar"},
{"properties": {"foo": "bar"}},
{"properties": {"provisioning_state": "foo"}},
{"provisioning_state": "bar", "properties": {"provisioning_state": "foo"}}
]
)
def test_get_provisioning_state(self, test_input):
output = subject._get_provisioning_state(test_input)
if test_input.get("provisioning_state"):
assert output == test_input["provisioning_state"]
elif test_input.get("properties") and test_input.get("properties").get("provisioning_state"):
assert output == test_input["properties"]["provisioning_state"]
else:
assert output is None
| [
"[email protected]"
] | |
0a03e97996b1fa69d230b0deb8bc34c21fa3c4c6 | 61f38a2e01908bd5cf2351071ad846706a642bde | /tensorflow/python/training/warm_starting_util_test.py | 3eddf79e341b9b663af6ca5646897ef886a05be8 | [
"Apache-2.0"
] | permissive | agataszcz/tensorflow | f22b2db4504d9094b4a8b00c72576601ae3e53c5 | 05973093a4716f861db2490dab2bcb8b9a6ee557 | refs/heads/master | 2020-03-30T14:49:48.233844 | 2018-10-02T23:36:45 | 2018-10-02T23:36:45 | 151,337,678 | 2 | 0 | Apache-2.0 | 2018-10-02T23:37:15 | 2018-10-02T23:37:23 | null | UTF-8 | Python | false | false | 52,883 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for warm_starting_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import six
from tensorflow.python.feature_column import feature_column as fc
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import warm_starting_util as ws_util
ones = init_ops.ones_initializer
norms = init_ops.truncated_normal_initializer
rand = init_ops.random_uniform_initializer
zeros = init_ops.zeros_initializer
class WarmStartingUtilTest(test.TestCase):
def _write_vocab(self, string_values, file_name):
vocab_file = os.path.join(self.get_temp_dir(), file_name)
with open(vocab_file, "w") as f:
f.write("\n".join(string_values))
return vocab_file
def _write_checkpoint(self, sess):
sess.run(variables.global_variables_initializer())
saver = saver_lib.Saver()
ckpt_prefix = os.path.join(self.get_temp_dir(), "model")
saver.save(sess, ckpt_prefix, global_step=0)
def _create_prev_run_var(self,
var_name,
shape=None,
initializer=None,
partitioner=None):
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
var = variable_scope.get_variable(
var_name,
shape=shape,
initializer=initializer,
partitioner=partitioner)
self._write_checkpoint(sess)
if partitioner:
self.assertTrue(isinstance(var, variables.PartitionedVariable))
var = var._get_variable_list()
return var, sess.run(var)
def _create_dummy_inputs(self):
return {
"sc_int": array_ops.sparse_placeholder(dtypes.int32),
"sc_hash": array_ops.sparse_placeholder(dtypes.string),
"sc_keys": array_ops.sparse_placeholder(dtypes.string),
"sc_vocab": array_ops.sparse_placeholder(dtypes.string),
"real": array_ops.placeholder(dtypes.float32)
}
def _create_linear_model(self, feature_cols, partitioner):
cols_to_vars = {}
with variable_scope.variable_scope("", partitioner=partitioner):
# Create the variables.
fc.linear_model(
features=self._create_dummy_inputs(),
feature_columns=feature_cols,
units=1,
cols_to_vars=cols_to_vars)
# Return a dictionary mapping each column to its variable.
return cols_to_vars
def _assert_cols_to_vars(self, cols_to_vars, cols_to_expected_values, sess):
for col, expected_values in six.iteritems(cols_to_expected_values):
for i, var in enumerate(cols_to_vars[col]):
self.assertAllClose(expected_values[i], var.eval(sess))
def testWarmStartVar(self):
_, prev_val = self._create_prev_run_var(
"fruit_weights", initializer=[[0.5], [1.], [1.5], [2.]])
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_weights = variable_scope.get_variable(
"fruit_weights", initializer=[[0.], [0.], [0.], [0.]])
ws_util._warm_start_var(fruit_weights, self.get_temp_dir())
sess.run(variables.global_variables_initializer())
self.assertAllClose(prev_val, fruit_weights.eval(sess))
def testWarmStartVarPrevVarPartitioned(self):
_, weights = self._create_prev_run_var(
"fruit_weights",
shape=[4, 1],
initializer=[[0.5], [1.], [1.5], [2.]],
partitioner=lambda shape, dtype: [2, 1])
prev_val = np.concatenate([weights[0], weights[1]], axis=0)
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_weights = variable_scope.get_variable(
"fruit_weights", initializer=[[0.], [0.], [0.], [0.]])
ws_util._warm_start_var(fruit_weights, self.get_temp_dir())
sess.run(variables.global_variables_initializer())
self.assertAllClose(prev_val, fruit_weights.eval(sess))
def testWarmStartVarCurrentVarPartitioned(self):
_, prev_val = self._create_prev_run_var(
"fruit_weights", initializer=[[0.5], [1.], [1.5], [2.]])
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_weights = variable_scope.get_variable(
"fruit_weights",
shape=[4, 1],
initializer=[[0.], [0.], [0.], [0.]],
partitioner=lambda shape, dtype: [2, 1])
self.assertTrue(
isinstance(fruit_weights, variables.PartitionedVariable))
ws_util._warm_start_var(fruit_weights, self.get_temp_dir())
sess.run(variables.global_variables_initializer())
fruit_weights = fruit_weights._get_variable_list()
new_val = np.concatenate(
[fruit_weights[0].eval(sess), fruit_weights[1].eval(sess)], axis=0)
self.assertAllClose(prev_val, new_val)
def testWarmStartVarBothVarsPartitioned(self):
_, weights = self._create_prev_run_var(
"old_scope/fruit_weights",
shape=[4, 1],
initializer=[[0.5], [1.], [1.5], [2.]],
partitioner=lambda shape, dtype: [2, 1])
prev_val = np.concatenate([weights[0], weights[1]], axis=0)
# New session and new graph.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_weights = variable_scope.get_variable(
"new_scope/fruit_weights",
shape=[4, 1],
initializer=[[0.], [0.], [0.], [0.]],
partitioner=lambda shape, dtype: [2, 1])
self.assertTrue(
isinstance(fruit_weights, variables.PartitionedVariable))
ws_util._warm_start_var(
fruit_weights,
self.get_temp_dir(),
prev_tensor_name="old_scope/fruit_weights")
sess.run(variables.global_variables_initializer())
fruit_weights = fruit_weights._get_variable_list()
new_val = np.concatenate(
[fruit_weights[0].eval(sess), fruit_weights[1].eval(sess)], axis=0)
self.assertAllClose(prev_val, new_val)
def testWarmStartVarWithVocab(self):
prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"old_vocab")
self._create_prev_run_var(
"fruit_weights", initializer=[[0.5], [1.], [1.5], [2.]])
# New vocab with elements in reverse order and one new element.
new_vocab_path = self._write_vocab(
["orange", "guava", "banana", "apple", "raspberry"], "new_vocab")
# New session and new graph.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_weights = variable_scope.get_variable(
"fruit_weights", initializer=[[0.], [0.], [0.], [0.], [0.]])
ws_util._warm_start_var_with_vocab(fruit_weights, new_vocab_path, 5,
self.get_temp_dir(), prev_vocab_path)
sess.run(variables.global_variables_initializer())
self.assertAllClose([[2.], [1.5], [1.], [0.5], [0.]],
fruit_weights.eval(sess))
def testWarmStartVarWithColumnVocab(self):
prev_vocab_path = self._write_vocab(["apple", "orange"], "old_vocab")
self._create_prev_run_var(
"fruit_output_layer",
initializer=[[0.5, 0.3], [1., 0.8], [1.5, 1.2], [2., 2.3]])
# New vocab with elements in reverse order and one new element.
new_vocab_path = self._write_vocab(["orange", "apple", "banana"],
"new_vocab")
# New session and new graph.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_output_layer = variable_scope.get_variable(
"fruit_output_layer",
initializer=[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.],
[0., 0., 0.]])
ws_util._warm_start_var_with_vocab(fruit_output_layer, new_vocab_path,
current_vocab_size=3,
prev_ckpt=self.get_temp_dir(),
prev_vocab_path=prev_vocab_path,
axis=1)
sess.run(variables.global_variables_initializer())
self.assertAllClose([[0.3, 0.5, 0.], [0.8, 1.0, 0.], [1.2, 1.5, 0.],
[2.3, 2., 0.]], fruit_output_layer.eval(sess))
def testWarmStartVarWithVocabConstrainedOldVocabSize(self):
prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"old_vocab")
self._create_prev_run_var(
"fruit_weights", initializer=[[0.5], [1.], [1.5], [2.]])
# New vocab with elements in reverse order and one new element.
new_vocab_path = self._write_vocab(
["orange", "guava", "banana", "apple", "raspberry"], "new_vocab")
# New session and new graph.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_weights = variable_scope.get_variable(
"fruit_weights", initializer=[[0.], [0.], [0.], [0.], [0.]])
ws_util._warm_start_var_with_vocab(
fruit_weights,
new_vocab_path,
5,
self.get_temp_dir(),
prev_vocab_path,
previous_vocab_size=2)
sess.run(variables.global_variables_initializer())
# Old vocabulary limited to ['apple', 'banana'].
self.assertAllClose([[0.], [0.], [1.], [0.5], [0.]],
fruit_weights.eval(sess))
def testWarmStartVarWithVocabPrevVarPartitioned(self):
prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"old_vocab")
self._create_prev_run_var(
"fruit_weights",
shape=[4, 1],
initializer=[[0.5], [1.], [1.5], [2.]],
partitioner=lambda shape, dtype: [2, 1])
# New vocab with elements in reverse order and one new element.
new_vocab_path = self._write_vocab(
["orange", "guava", "banana", "apple", "raspberry"], "new_vocab")
# New session and new graph.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_weights = variable_scope.get_variable(
"fruit_weights", initializer=[[0.], [0.], [0.], [0.], [0.]])
ws_util._warm_start_var_with_vocab(fruit_weights, new_vocab_path, 5,
self.get_temp_dir(), prev_vocab_path)
sess.run(variables.global_variables_initializer())
self.assertAllClose([[2.], [1.5], [1.], [0.5], [0.]],
fruit_weights.eval(sess))
def testWarmStartVarWithColumnVocabPrevVarPartitioned(self):
prev_vocab_path = self._write_vocab(["apple", "orange"], "old_vocab")
self._create_prev_run_var(
"fruit_output_layer",
shape=[4, 2],
initializer=[[0.5, 0.3], [1., 0.8], [1.5, 1.2], [2., 2.3]],
partitioner=lambda shape, dtype: [2, 1])
# New vocab with elements in reverse order and one new element.
new_vocab_path = self._write_vocab(["orange", "apple", "banana"],
"new_vocab")
# New session and new graph.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_output_layer = variable_scope.get_variable(
"fruit_output_layer",
initializer=[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.],
[0., 0., 0.]])
ws_util._warm_start_var_with_vocab(fruit_output_layer, new_vocab_path,
current_vocab_size=3,
prev_ckpt=self.get_temp_dir(),
prev_vocab_path=prev_vocab_path,
axis=1)
sess.run(variables.global_variables_initializer())
self.assertAllClose([[0.3, 0.5, 0.], [0.8, 1.0, 0.], [1.2, 1.5, 0.],
[2.3, 2., 0.]], fruit_output_layer.eval(sess))
def testWarmStartVarWithVocabCurrentVarPartitioned(self):
prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"old_vocab")
self._create_prev_run_var(
"fruit_weights", initializer=[[0.5], [1.], [1.5], [2.]])
# New vocab with elements in reverse order and one new element.
new_vocab_path = self._write_vocab(
["orange", "guava", "banana", "apple", "raspberry"], "new_vocab")
# New session and new graph.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_weights = variable_scope.get_variable(
"fruit_weights",
shape=[6, 1],
initializer=[[0.], [0.], [0.], [0.], [0.], [0.]],
partitioner=lambda shape, dtype: [2, 1])
ws_util._warm_start_var_with_vocab(
fruit_weights,
new_vocab_path,
5,
self.get_temp_dir(),
prev_vocab_path,
current_oov_buckets=1)
sess.run(variables.global_variables_initializer())
self.assertTrue(
isinstance(fruit_weights, variables.PartitionedVariable))
fruit_weights_vars = fruit_weights._get_variable_list()
self.assertAllClose([[2.], [1.5], [1.]],
fruit_weights_vars[0].eval(sess))
self.assertAllClose([[0.5], [0.], [0.]],
fruit_weights_vars[1].eval(sess))
def testWarmStartVarWithColumnVocabCurrentVarPartitioned(self):
prev_vocab_path = self._write_vocab(["apple", "orange"], "old_vocab")
self._create_prev_run_var(
"fruit_output_layer",
initializer=[[0.5, 0.3], [1., 0.8], [1.5, 1.2], [2., 2.3]])
# New vocab with elements in reverse order and one new element.
new_vocab_path = self._write_vocab(["orange", "apple", "banana"],
"new_vocab")
# New session and new graph.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_output_layer = variable_scope.get_variable(
"fruit_output_layer",
shape=[4, 3],
initializer=[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.],
[0., 0., 0.]],
partitioner=lambda shape, dtype: [2, 1])
ws_util._warm_start_var_with_vocab(fruit_output_layer, new_vocab_path,
current_vocab_size=3,
prev_ckpt=self.get_temp_dir(),
prev_vocab_path=prev_vocab_path,
axis=1)
sess.run(variables.global_variables_initializer())
self.assertTrue(
isinstance(fruit_output_layer, variables.PartitionedVariable))
fruit_output_layer_vars = fruit_output_layer._get_variable_list()
self.assertAllClose([[0.3, 0.5, 0.], [0.8, 1.0, 0.]],
fruit_output_layer_vars[0].eval(sess))
self.assertAllClose([[1.2, 1.5, 0.], [2.3, 2., 0.]],
fruit_output_layer_vars[1].eval(sess))
def testWarmStartVarWithVocabBothVarsPartitioned(self):
prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"old_vocab")
self._create_prev_run_var(
"fruit_weights",
shape=[4, 1],
initializer=[[0.5], [1.], [1.5], [2.]],
partitioner=lambda shape, dtype: [2, 1])
# New vocab with elements in reverse order and two new elements.
new_vocab_path = self._write_vocab(
["orange", "guava", "banana", "apple", "raspberry",
"blueberry"], "new_vocab")
# New session and new graph.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_weights = variable_scope.get_variable(
"fruit_weights",
shape=[6, 1],
initializer=[[0.], [0.], [0.], [0.], [0.], [0.]],
partitioner=lambda shape, dtype: [2, 1])
ws_util._warm_start_var_with_vocab(fruit_weights, new_vocab_path, 6,
self.get_temp_dir(), prev_vocab_path)
sess.run(variables.global_variables_initializer())
self.assertTrue(
isinstance(fruit_weights, variables.PartitionedVariable))
fruit_weights_vars = fruit_weights._get_variable_list()
self.assertAllClose([[2.], [1.5], [1.]],
fruit_weights_vars[0].eval(sess))
self.assertAllClose([[0.5], [0.], [0.]],
fruit_weights_vars[1].eval(sess))
def testWarmStartVarWithColumnVocabBothVarsPartitioned(self):
prev_vocab_path = self._write_vocab(["apple", "orange"], "old_vocab")
self._create_prev_run_var(
"fruit_output_layer",
shape=[4, 2],
initializer=[[0.5, 0.3], [1., 0.8], [1.5, 1.2], [2., 2.3]],
partitioner=lambda shape, dtype: [2, 1])
# New vocab with elements in reverse order and one new element.
new_vocab_path = self._write_vocab(["orange", "apple", "banana"],
"new_vocab")
# New session and new graph.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_output_layer = variable_scope.get_variable(
"fruit_output_layer",
shape=[4, 3],
initializer=[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.],
[0., 0., 0.]],
partitioner=lambda shape, dtype: [2, 1])
ws_util._warm_start_var_with_vocab(fruit_output_layer, new_vocab_path,
current_vocab_size=3,
prev_ckpt=self.get_temp_dir(),
prev_vocab_path=prev_vocab_path,
axis=1)
sess.run(variables.global_variables_initializer())
self.assertTrue(
isinstance(fruit_output_layer, variables.PartitionedVariable))
fruit_output_layer_vars = fruit_output_layer._get_variable_list()
self.assertAllClose([[0.3, 0.5, 0.], [0.8, 1.0, 0.]],
fruit_output_layer_vars[0].eval(sess))
self.assertAllClose([[1.2, 1.5, 0.], [2.3, 2., 0.]],
fruit_output_layer_vars[1].eval(sess))
def testWarmStart_ListOfVariables(self):
# Save checkpoint from which to warm-start.
_, prev_int_val = self._create_prev_run_var("v1", shape=[10, 1],
initializer=ones())
# Verify we initialized the values correctly.
self.assertAllEqual(np.ones([10, 1]), prev_int_val)
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
# Initialize with zeros.
var = variable_scope.get_variable(
"v1",
shape=[10, 1],
initializer=zeros())
ws_util.warm_start(self.get_temp_dir(), vars_to_warm_start=[var])
sess.run(variables.global_variables_initializer())
# Verify weights were correctly warm-started (init overridden to ones).
self.assertAllEqual(var.eval(), prev_int_val)
def testWarmStart_ListOfStrings(self):
# Save checkpoint from which to warm-start.
_, prev_int_val = self._create_prev_run_var("v1", shape=[10, 1],
initializer=ones())
# Verify we initialized the values correctly.
self.assertAllEqual(np.ones([10, 1]), prev_int_val)
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
# Initialize with zeros.
var = variable_scope.get_variable(
"v1",
shape=[10, 1],
initializer=zeros())
ws_util.warm_start(self.get_temp_dir(), vars_to_warm_start=["v1"])
sess.run(variables.global_variables_initializer())
# Verify weights were correctly warm-started (init overridden to ones).
self.assertAllEqual(var.eval(), prev_int_val)
def testWarmStart_SparseColumnIntegerized(self):
# Create feature column.
sc_int = fc.categorical_column_with_identity("sc_int", num_buckets=10)
# Save checkpoint from which to warm-start.
_, prev_int_val = self._create_prev_run_var(
"linear_model/sc_int/weights", shape=[10, 1], initializer=ones())
# Verify we initialized the values correctly.
self.assertAllEqual(np.ones([10, 1]), prev_int_val)
partitioner = lambda shape, dtype: [1] * len(shape)
# New graph, new session WITHOUT warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model([sc_int], partitioner)
sess.run(variables.global_variables_initializer())
# Without warm-starting, the weights should be initialized using default
# initializer (which is init_ops.zeros_initializer).
self._assert_cols_to_vars(cols_to_vars, {sc_int: [np.zeros([10, 1])]},
sess)
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model([sc_int], partitioner)
ws_util.warm_start(self.get_temp_dir(), vars_to_warm_start=".*sc_int.*")
sess.run(variables.global_variables_initializer())
# Verify weights were correctly warm-started.
self._assert_cols_to_vars(cols_to_vars, {sc_int: [prev_int_val]}, sess)
def testWarmStart_SparseColumnHashed(self):
# Create feature column.
sc_hash = fc.categorical_column_with_hash_bucket(
"sc_hash", hash_bucket_size=15)
# Save checkpoint from which to warm-start.
_, prev_hash_val = self._create_prev_run_var(
"linear_model/sc_hash/weights", shape=[15, 1], initializer=norms())
partitioner = lambda shape, dtype: [1] * len(shape)
# New graph, new session WITHOUT warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model([sc_hash], partitioner)
sess.run(variables.global_variables_initializer())
# Without warm-starting, the weights should be initialized using default
# initializer (which is init_ops.zeros_initializer).
self._assert_cols_to_vars(cols_to_vars, {sc_hash: [np.zeros([15, 1])]},
sess)
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model([sc_hash], partitioner)
ws_util.warm_start(
self.get_temp_dir(), vars_to_warm_start=".*sc_hash.*")
sess.run(variables.global_variables_initializer())
# Verify weights were correctly warm-started.
self._assert_cols_to_vars(cols_to_vars, {sc_hash: [prev_hash_val]},
sess)
def testWarmStart_SparseColumnVocabulary(self):
# Create vocab for sparse column "sc_vocab".
vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"vocab")
# Create feature column.
sc_vocab = fc.categorical_column_with_vocabulary_file(
"sc_vocab", vocabulary_file=vocab_path, vocabulary_size=4)
# Save checkpoint from which to warm-start.
_, prev_vocab_val = self._create_prev_run_var(
"linear_model/sc_vocab/weights", shape=[4, 1], initializer=ones())
partitioner = lambda shape, dtype: [1] * len(shape)
# New graph, new session WITHOUT warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model([sc_vocab], partitioner)
sess.run(variables.global_variables_initializer())
# Without warm-starting, the weights should be initialized using default
# initializer (which is init_ops.zeros_initializer).
self._assert_cols_to_vars(cols_to_vars, {sc_vocab: [np.zeros([4, 1])]},
sess)
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model([sc_vocab], partitioner)
# Since old vocab is not explicitly set in WarmStartSettings, the old
# vocab is assumed to be same as new vocab.
ws_util.warm_start(
self.get_temp_dir(), vars_to_warm_start=".*sc_vocab.*")
sess.run(variables.global_variables_initializer())
# Verify weights were correctly warm-started.
self._assert_cols_to_vars(cols_to_vars, {sc_vocab: [prev_vocab_val]},
sess)
def testWarmStart_ExplicitCheckpointFile(self):
# Create vocab for sparse column "sc_vocab".
vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"vocab")
# Create feature column.
sc_vocab = fc.categorical_column_with_vocabulary_file(
"sc_vocab", vocabulary_file=vocab_path, vocabulary_size=4)
# Save checkpoint from which to warm-start.
_, prev_vocab_val = self._create_prev_run_var(
"linear_model/sc_vocab/weights", shape=[4, 1], initializer=ones())
partitioner = lambda shape, dtype: [1] * len(shape)
# New graph, new session WITHOUT warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model([sc_vocab], partitioner)
sess.run(variables.global_variables_initializer())
# Without warm-starting, the weights should be initialized using default
# initializer (which is init_ops.zeros_initializer).
self._assert_cols_to_vars(cols_to_vars, {sc_vocab: [np.zeros([4, 1])]},
sess)
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model([sc_vocab], partitioner)
# Since old vocab is not explicitly set in WarmStartSettings, the old
# vocab is assumed to be same as new vocab.
ws_util.warm_start(
# Explicitly provide the file prefix instead of just the dir.
os.path.join(self.get_temp_dir(), "model-0"),
vars_to_warm_start=".*sc_vocab.*")
sess.run(variables.global_variables_initializer())
# Verify weights were correctly warm-started.
self._assert_cols_to_vars(cols_to_vars, {sc_vocab: [prev_vocab_val]},
sess)
def testWarmStart_SparseColumnVocabularyConstrainedVocabSizes(self):
# Create old vocabulary, and use a size smaller than the total number of
# entries.
old_vocab_path = self._write_vocab(["apple", "guava", "banana"],
"old_vocab")
old_vocab_size = 2 # ['apple', 'guava']
# Create new vocab for sparse column "sc_vocab".
current_vocab_path = self._write_vocab(
["apple", "banana", "guava", "orange"], "current_vocab")
# Create feature column. Only use 2 of the actual entries, resulting in
# ['apple', 'banana'] for the new vocabulary.
sc_vocab = fc.categorical_column_with_vocabulary_file(
"sc_vocab", vocabulary_file=current_vocab_path, vocabulary_size=2)
# Save checkpoint from which to warm-start.
self._create_prev_run_var(
"linear_model/sc_vocab/weights", shape=[2, 1], initializer=ones())
partitioner = lambda shape, dtype: [1] * len(shape)
# New graph, new session WITHOUT warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model([sc_vocab], partitioner)
sess.run(variables.global_variables_initializer())
# Without warm-starting, the weights should be initialized using default
# initializer (which is init_ops.zeros_initializer).
self._assert_cols_to_vars(cols_to_vars, {sc_vocab: [np.zeros([2, 1])]},
sess)
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model([sc_vocab], partitioner)
vocab_info = ws_util.VocabInfo(
new_vocab=sc_vocab.vocabulary_file,
new_vocab_size=sc_vocab.vocabulary_size,
num_oov_buckets=sc_vocab.num_oov_buckets,
old_vocab=old_vocab_path,
old_vocab_size=old_vocab_size)
ws_util.warm_start(
ckpt_to_initialize_from=self.get_temp_dir(),
vars_to_warm_start=".*sc_vocab.*",
var_name_to_vocab_info={
"linear_model/sc_vocab/weights": vocab_info
})
sess.run(variables.global_variables_initializer())
# Verify weights were correctly warm-started. 'banana' isn't in the
# first two entries of the old vocabulary, so it's newly initialized.
self._assert_cols_to_vars(cols_to_vars, {sc_vocab: [[[1], [0]]]}, sess)
def testWarmStart_BucketizedColumn(self):
# Create feature column.
real = fc.numeric_column("real")
real_bucket = fc.bucketized_column(real, boundaries=[0., 1., 2., 3.])
# Save checkpoint from which to warm-start.
_, prev_bucket_val = self._create_prev_run_var(
"linear_model/real_bucketized/weights",
shape=[5, 1],
initializer=norms())
partitioner = lambda shape, dtype: [1] * len(shape)
# New graph, new session WITHOUT warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model([real_bucket], partitioner)
sess.run(variables.global_variables_initializer())
# Without warm-starting, the weights should be initialized using default
# initializer (which is init_ops.zeros_initializer).
self._assert_cols_to_vars(cols_to_vars,
{real_bucket: [np.zeros([5, 1])]}, sess)
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model([real_bucket], partitioner)
ws_util.warm_start(
self.get_temp_dir(), vars_to_warm_start=".*real_bucketized.*")
sess.run(variables.global_variables_initializer())
# Verify weights were correctly warm-started.
self._assert_cols_to_vars(cols_to_vars,
{real_bucket: [prev_bucket_val]}, sess)
def testWarmStart_MultipleCols(self):
# Create vocab for sparse column "sc_vocab".
vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"vocab")
# Create feature columns.
sc_int = fc.categorical_column_with_identity("sc_int", num_buckets=10)
sc_hash = fc.categorical_column_with_hash_bucket(
"sc_hash", hash_bucket_size=15)
sc_keys = fc.categorical_column_with_vocabulary_list(
"sc_keys", vocabulary_list=["a", "b", "c", "e"])
sc_vocab = fc.categorical_column_with_vocabulary_file(
"sc_vocab", vocabulary_file=vocab_path, vocabulary_size=4)
real = fc.numeric_column("real")
real_bucket = fc.bucketized_column(real, boundaries=[0., 1., 2., 3.])
cross = fc.crossed_column([sc_keys, sc_vocab], hash_bucket_size=20)
all_linear_cols = [sc_int, sc_hash, sc_keys, sc_vocab, real_bucket, cross]
# Save checkpoint from which to warm-start. Also create a bias variable,
# so we can check that it's also warm-started.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
sc_int_weights = variable_scope.get_variable(
"linear_model/sc_int/weights", shape=[10, 1], initializer=ones())
sc_hash_weights = variable_scope.get_variable(
"linear_model/sc_hash/weights", shape=[15, 1], initializer=norms())
sc_keys_weights = variable_scope.get_variable(
"linear_model/sc_keys/weights", shape=[4, 1], initializer=rand())
sc_vocab_weights = variable_scope.get_variable(
"linear_model/sc_vocab/weights", shape=[4, 1], initializer=ones())
real_bucket_weights = variable_scope.get_variable(
"linear_model/real_bucketized/weights",
shape=[5, 1],
initializer=norms())
cross_weights = variable_scope.get_variable(
"linear_model/sc_keys_X_sc_vocab/weights",
shape=[20, 1],
initializer=rand())
bias = variable_scope.get_variable(
"linear_model/bias_weights",
shape=[1],
initializer=rand())
self._write_checkpoint(sess)
(prev_int_val, prev_hash_val, prev_keys_val, prev_vocab_val,
prev_bucket_val, prev_cross_val, prev_bias_val) = sess.run([
sc_int_weights, sc_hash_weights, sc_keys_weights, sc_vocab_weights,
real_bucket_weights, cross_weights, bias
])
partitioner = lambda shape, dtype: [1] * len(shape)
# New graph, new session WITHOUT warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model(all_linear_cols, partitioner)
sess.run(variables.global_variables_initializer())
# Without warm-starting, all weights should be initialized using default
# initializer (which is init_ops.zeros_initializer).
self._assert_cols_to_vars(cols_to_vars, {
sc_int: [np.zeros([10, 1])],
sc_hash: [np.zeros([15, 1])],
sc_keys: [np.zeros([4, 1])],
sc_vocab: [np.zeros([4, 1])],
real_bucket: [np.zeros([5, 1])],
cross: [np.zeros([20, 1])],
}, sess)
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model(all_linear_cols, partitioner)
vocab_info = ws_util.VocabInfo(
new_vocab=sc_vocab.vocabulary_file,
new_vocab_size=sc_vocab.vocabulary_size,
num_oov_buckets=sc_vocab.num_oov_buckets,
old_vocab=vocab_path)
ws_util.warm_start(
self.get_temp_dir(),
var_name_to_vocab_info={
"linear_model/sc_vocab/weights": vocab_info
})
sess.run(variables.global_variables_initializer())
# Verify weights were correctly warm-started.
self._assert_cols_to_vars(cols_to_vars, {
sc_int: [prev_int_val],
sc_hash: [prev_hash_val],
sc_keys: [prev_keys_val],
sc_vocab: [prev_vocab_val],
real_bucket: [prev_bucket_val],
cross: [prev_cross_val],
"bias": [prev_bias_val],
}, sess)
def testWarmStartMoreSettings(self):
# Create old and new vocabs for sparse column "sc_vocab".
prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"old_vocab")
new_vocab_path = self._write_vocab(
["orange", "guava", "banana", "apple", "raspberry",
"blueberry"], "new_vocab")
# Create feature columns.
sc_hash = fc.categorical_column_with_hash_bucket(
"sc_hash", hash_bucket_size=15)
sc_keys = fc.categorical_column_with_vocabulary_list(
"sc_keys", vocabulary_list=["a", "b", "c", "e"])
sc_vocab = fc.categorical_column_with_vocabulary_file(
"sc_vocab", vocabulary_file=new_vocab_path, vocabulary_size=6)
all_linear_cols = [sc_hash, sc_keys, sc_vocab]
# Save checkpoint from which to warm-start.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
variable_scope.get_variable(
"linear_model/sc_hash/weights", shape=[15, 1], initializer=norms())
sc_keys_weights = variable_scope.get_variable(
"some_other_name", shape=[4, 1], initializer=rand())
variable_scope.get_variable(
"linear_model/sc_vocab/weights",
initializer=[[0.5], [1.], [2.], [3.]])
self._write_checkpoint(sess)
prev_keys_val = sess.run(sc_keys_weights)
def _partitioner(shape, dtype): # pylint:disable=unused-argument
# Partition each var into 2 equal slices.
partitions = [1] * len(shape)
partitions[0] = min(2, shape[0].value)
return partitions
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model(all_linear_cols, _partitioner)
vocab_info = ws_util.VocabInfo(
new_vocab=sc_vocab.vocabulary_file,
new_vocab_size=sc_vocab.vocabulary_size,
num_oov_buckets=sc_vocab.num_oov_buckets,
old_vocab=prev_vocab_path)
ws_util.warm_start(
self.get_temp_dir(),
vars_to_warm_start=".*(sc_keys|sc_vocab).*",
var_name_to_vocab_info={
ws_util._infer_var_name(cols_to_vars[sc_vocab]): vocab_info
},
var_name_to_prev_var_name={
ws_util._infer_var_name(cols_to_vars[sc_keys]):
"some_other_name"
})
sess.run(variables.global_variables_initializer())
# Verify weights were correctly warm-started. Var corresponding to
# sc_hash should not be warm-started. Var corresponding to sc_vocab
# should be correctly warm-started after vocab remapping.
self._assert_cols_to_vars(cols_to_vars, {
sc_keys:
np.split(prev_keys_val, 2),
sc_hash: [np.zeros([8, 1]), np.zeros([7, 1])],
sc_vocab: [
np.array([[3.], [2.], [1.]]),
np.array([[0.5], [0.], [0.]])
]
}, sess)
def testWarmStartMoreSettingsNoPartitioning(self):
# Create old and new vocabs for sparse column "sc_vocab".
prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"old_vocab")
new_vocab_path = self._write_vocab(
["orange", "guava", "banana", "apple", "raspberry",
"blueberry"], "new_vocab")
# Create feature columns.
sc_hash = fc.categorical_column_with_hash_bucket(
"sc_hash", hash_bucket_size=15)
sc_keys = fc.categorical_column_with_vocabulary_list(
"sc_keys", vocabulary_list=["a", "b", "c", "e"])
sc_vocab = fc.categorical_column_with_vocabulary_file(
"sc_vocab", vocabulary_file=new_vocab_path, vocabulary_size=6)
all_linear_cols = [sc_hash, sc_keys, sc_vocab]
# Save checkpoint from which to warm-start.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
variable_scope.get_variable(
"linear_model/sc_hash/weights", shape=[15, 1], initializer=norms())
sc_keys_weights = variable_scope.get_variable(
"some_other_name", shape=[4, 1], initializer=rand())
variable_scope.get_variable(
"linear_model/sc_vocab/weights",
initializer=[[0.5], [1.], [2.], [3.]])
self._write_checkpoint(sess)
prev_keys_val = sess.run(sc_keys_weights)
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model(all_linear_cols,
partitioner=None)
vocab_info = ws_util.VocabInfo(
new_vocab=sc_vocab.vocabulary_file,
new_vocab_size=sc_vocab.vocabulary_size,
num_oov_buckets=sc_vocab.num_oov_buckets,
old_vocab=prev_vocab_path)
ws_util.warm_start(
self.get_temp_dir(),
vars_to_warm_start=".*(sc_keys|sc_vocab).*",
var_name_to_vocab_info={
ws_util._infer_var_name(cols_to_vars[sc_vocab]): vocab_info
},
var_name_to_prev_var_name={
ws_util._infer_var_name(cols_to_vars[sc_keys]):
"some_other_name"
})
sess.run(variables.global_variables_initializer())
# Verify weights were correctly warm-started. Var corresponding to
# sc_hash should not be warm-started. Var corresponding to sc_vocab
# should be correctly warm-started after vocab remapping.
self._assert_cols_to_vars(cols_to_vars, {
sc_keys: [prev_keys_val],
sc_hash: [np.zeros([15, 1])],
sc_vocab: [np.array([[3.], [2.], [1.], [0.5], [0.], [0.]])]
}, sess)
def testWarmStartVarsToWarmstartIsNone(self):
# Create old and new vocabs for sparse column "sc_vocab".
prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"old_vocab")
new_vocab_path = self._write_vocab(
["orange", "guava", "banana", "apple", "raspberry",
"blueberry"], "new_vocab")
# Create feature columns.
sc_hash = fc.categorical_column_with_hash_bucket(
"sc_hash", hash_bucket_size=15)
sc_keys = fc.categorical_column_with_vocabulary_list(
"sc_keys", vocabulary_list=["a", "b", "c", "e"])
sc_vocab = fc.categorical_column_with_vocabulary_file(
"sc_vocab", vocabulary_file=new_vocab_path, vocabulary_size=6)
all_linear_cols = [sc_hash, sc_keys, sc_vocab]
# Save checkpoint from which to warm-start.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
variable_scope.get_variable(
"linear_model/sc_hash/weights", shape=[15, 1], initializer=norms())
variable_scope.get_variable(
"some_other_name", shape=[4, 1], initializer=rand())
variable_scope.get_variable(
"linear_model/sc_vocab/weights",
initializer=[[0.5], [1.], [2.], [3.]])
self._write_checkpoint(sess)
def _partitioner(shape, dtype): # pylint:disable=unused-argument
# Partition each var into 2 equal slices.
partitions = [1] * len(shape)
partitions[0] = min(2, shape[0].value)
return partitions
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model(all_linear_cols, _partitioner)
vocab_info = ws_util.VocabInfo(
new_vocab=sc_vocab.vocabulary_file,
new_vocab_size=sc_vocab.vocabulary_size,
num_oov_buckets=sc_vocab.num_oov_buckets,
old_vocab=prev_vocab_path)
ws_util.warm_start(
self.get_temp_dir(),
# The special value of None here will ensure that only the variable
# specified in var_name_to_vocab_info (sc_vocab embedding) is
# warm-started.
vars_to_warm_start=None,
var_name_to_vocab_info={
ws_util._infer_var_name(cols_to_vars[sc_vocab]): vocab_info
},
# Even though this is provided, the None value for
# vars_to_warm_start overrides the logic, and this will not be
# warm-started.
var_name_to_prev_var_name={
ws_util._infer_var_name(cols_to_vars[sc_keys]):
"some_other_name"
})
sess.run(variables.global_variables_initializer())
# Verify weights were correctly warm-started. Var corresponding to
# sc_vocab should be correctly warm-started after vocab remapping,
# and neither of the other two should be warm-started..
self._assert_cols_to_vars(cols_to_vars, {
sc_keys: [np.zeros([2, 1]), np.zeros([2, 1])],
sc_hash: [np.zeros([8, 1]), np.zeros([7, 1])],
sc_vocab: [
np.array([[3.], [2.], [1.]]),
np.array([[0.5], [0.], [0.]])
]
}, sess)
def testWarmStartEmbeddingColumn(self):
# Create old and new vocabs for embedding column "sc_vocab".
prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"old_vocab")
new_vocab_path = self._write_vocab(
["orange", "guava", "banana", "apple", "raspberry", "blueberry"],
"new_vocab")
# Save checkpoint from which to warm-start.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
variable_scope.get_variable(
"input_layer/sc_vocab_embedding/embedding_weights",
initializer=[[0.5, 0.4], [1., 1.1], [2., 2.2], [3., 3.3]])
self._write_checkpoint(sess)
def _partitioner(shape, dtype): # pylint:disable=unused-argument
# Partition each var into 2 equal slices.
partitions = [1] * len(shape)
partitions[0] = min(2, shape[0].value)
return partitions
# Create feature columns.
sc_vocab = fc.categorical_column_with_vocabulary_file(
"sc_vocab", vocabulary_file=new_vocab_path, vocabulary_size=6)
emb_vocab_column = fc.embedding_column(
categorical_column=sc_vocab,
dimension=2)
all_deep_cols = [emb_vocab_column]
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = {}
with variable_scope.variable_scope("", partitioner=_partitioner):
# Create the variables.
fc.input_layer(
features=self._create_dummy_inputs(),
feature_columns=all_deep_cols,
cols_to_vars=cols_to_vars)
vocab_info = ws_util.VocabInfo(
new_vocab=sc_vocab.vocabulary_file,
new_vocab_size=sc_vocab.vocabulary_size,
num_oov_buckets=sc_vocab.num_oov_buckets,
old_vocab=prev_vocab_path,
# Can't use constant_initializer with load_and_remap. In practice,
# use a truncated normal initializer.
backup_initializer=init_ops.random_uniform_initializer(
minval=0.42, maxval=0.42))
ws_util.warm_start(
self.get_temp_dir(),
var_name_to_vocab_info={
ws_util._infer_var_name(cols_to_vars[emb_vocab_column]):
vocab_info
})
sess.run(variables.global_variables_initializer())
# Verify weights were correctly warm-started. Var corresponding to
# emb_vocab_column should be correctly warm-started after vocab
# remapping. Missing values are filled in with the EmbeddingColumn's
# initializer.
self._assert_cols_to_vars(
cols_to_vars, {
emb_vocab_column: [
np.array([[3., 3.3], [2., 2.2], [1., 1.1]]),
np.array([[0.5, 0.4], [0.42, 0.42], [0.42, 0.42]])
]
}, sess)
def testWarmStartEmbeddingColumnLinearModel(self):
# Create old and new vocabs for embedding column "sc_vocab".
prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"old_vocab")
new_vocab_path = self._write_vocab(
["orange", "guava", "banana", "apple", "raspberry", "blueberry"],
"new_vocab")
# Save checkpoint from which to warm-start.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
variable_scope.get_variable(
"linear_model/sc_vocab_embedding/embedding_weights",
initializer=[[0.5, 0.4], [1., 1.1], [2., 2.2], [3., 3.3]])
variable_scope.get_variable(
"linear_model/sc_vocab_embedding/weights",
initializer=[[0.69], [0.71]])
self._write_checkpoint(sess)
def _partitioner(shape, dtype): # pylint:disable=unused-argument
# Partition each var into 2 equal slices.
partitions = [1] * len(shape)
partitions[0] = min(2, shape[0].value)
return partitions
# Create feature columns.
sc_vocab = fc.categorical_column_with_vocabulary_file(
"sc_vocab", vocabulary_file=new_vocab_path, vocabulary_size=6)
emb_vocab = fc.embedding_column(
categorical_column=sc_vocab,
dimension=2)
all_deep_cols = [emb_vocab]
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = {}
with variable_scope.variable_scope("", partitioner=_partitioner):
# Create the variables.
fc.linear_model(
features=self._create_dummy_inputs(),
feature_columns=all_deep_cols,
cols_to_vars=cols_to_vars)
# Construct the vocab_info for the embedding weight.
vocab_info = ws_util.VocabInfo(
new_vocab=sc_vocab.vocabulary_file,
new_vocab_size=sc_vocab.vocabulary_size,
num_oov_buckets=sc_vocab.num_oov_buckets,
old_vocab=prev_vocab_path,
# Can't use constant_initializer with load_and_remap. In practice,
# use a truncated normal initializer.
backup_initializer=init_ops.random_uniform_initializer(
minval=0.42, maxval=0.42))
ws_util.warm_start(
self.get_temp_dir(),
vars_to_warm_start=".*sc_vocab.*",
var_name_to_vocab_info={
"linear_model/sc_vocab_embedding/embedding_weights": vocab_info
})
sess.run(variables.global_variables_initializer())
# Verify weights were correctly warm-started. Var corresponding to
# emb_vocab should be correctly warm-started after vocab remapping.
# Missing values are filled in with the EmbeddingColumn's initializer.
self._assert_cols_to_vars(
cols_to_vars,
{
emb_vocab: [
# linear weights part 0.
np.array([[0.69]]),
# linear weights part 1.
np.array([[0.71]]),
# embedding_weights part 0.
np.array([[3., 3.3], [2., 2.2], [1., 1.1]]),
# embedding_weights part 1.
np.array([[0.5, 0.4], [0.42, 0.42], [0.42, 0.42]])
]
},
sess)
def testErrorConditions(self):
x = variable_scope.get_variable(
"x",
shape=[4, 1],
initializer=ones(),
partitioner=lambda shape, dtype: [2, 1])
# List of PartitionedVariable is invalid type when warm-starting with vocab.
self.assertRaises(TypeError, ws_util._warm_start_var_with_vocab, [x],
"/tmp", 5, "/tmp", "/tmp")
# Unused variable names raises ValueError.
with ops.Graph().as_default():
with self.cached_session() as sess:
x = variable_scope.get_variable(
"x",
shape=[4, 1],
initializer=ones(),
partitioner=lambda shape, dtype: [2, 1])
self._write_checkpoint(sess)
self.assertRaises(
ValueError,
ws_util.warm_start,
self.get_temp_dir(),
var_name_to_vocab_info={"y": ws_util.VocabInfo("", 1, 0, "")})
self.assertRaises(
ValueError,
ws_util.warm_start,
self.get_temp_dir(),
var_name_to_prev_var_name={"y": "y2"})
if __name__ == "__main__":
test.main()
| [
"[email protected]"
] | |
2e5c9db597970c087922fe5fb4e821040099e528 | d92ce9a32bf20086e30701585a4e73c1f2469aff | /FunDooapp/virtualenv/bin/pyreverse | 4bb367f33b44c24c82bc7034882867f23526d633 | [] | no_license | Prem-chouhan/fellowshipProgram_PremsinghChouhan | f61cf4407458f14ef7eb6d80effb25f9592d2552 | 33e6b57f6c75a80d8a3d1f868d379e85365a1336 | refs/heads/master | 2020-09-14T12:45:16.269268 | 2019-12-23T14:24:10 | 2019-12-23T14:24:10 | 223,128,906 | 0 | 1 | null | 2020-07-22T11:50:46 | 2019-11-21T08:47:28 | Python | UTF-8 | Python | false | false | 271 | #!/home/admin-1/PycharmProjects/FunDooapp/virtualenv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_pyreverse
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run_pyreverse())
| [
"[email protected]"
] | ||
659d0ad0fba5b883dc88d5f5b01c4bb971bf6cc1 | 54d2887e3c910f68366bd0aab3c692d54245e22a | /abc/abc_126_211/abc143/c.py | a61192377b5409ade4b4e25ce7aba3578a87535b | [] | no_license | Kevinrobot34/atcoder | 7aec367fd2c6b589e9d583dae7b3c7520ce9fa12 | 482ea508f098f81e4f19522fe518dd22c781aca9 | refs/heads/master | 2022-07-10T23:44:45.290022 | 2022-06-29T11:30:26 | 2022-06-29T11:30:26 | 158,081,477 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 112 | py | n = int(input())
s = input()
ans = 1
for i in range(1, n):
if s[i] != s[i-1]:
ans += 1
print(ans)
| [
"[email protected]"
] | |
5eb470788942ba3791892cf184b8320ff59048b3 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/kusto/v20210101/get_database_principal_assignment.py | 6702cacfd4bb91dcca91097e194373ed4b8284f3 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,320 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetDatabasePrincipalAssignmentResult',
'AwaitableGetDatabasePrincipalAssignmentResult',
'get_database_principal_assignment',
'get_database_principal_assignment_output',
]
@pulumi.output_type
class GetDatabasePrincipalAssignmentResult:
"""
Class representing a database principal assignment.
"""
def __init__(__self__, id=None, name=None, principal_id=None, principal_name=None, principal_type=None, provisioning_state=None, role=None, tenant_id=None, tenant_name=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if principal_id and not isinstance(principal_id, str):
raise TypeError("Expected argument 'principal_id' to be a str")
pulumi.set(__self__, "principal_id", principal_id)
if principal_name and not isinstance(principal_name, str):
raise TypeError("Expected argument 'principal_name' to be a str")
pulumi.set(__self__, "principal_name", principal_name)
if principal_type and not isinstance(principal_type, str):
raise TypeError("Expected argument 'principal_type' to be a str")
pulumi.set(__self__, "principal_type", principal_type)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if role and not isinstance(role, str):
raise TypeError("Expected argument 'role' to be a str")
pulumi.set(__self__, "role", role)
if tenant_id and not isinstance(tenant_id, str):
raise TypeError("Expected argument 'tenant_id' to be a str")
pulumi.set(__self__, "tenant_id", tenant_id)
if tenant_name and not isinstance(tenant_name, str):
raise TypeError("Expected argument 'tenant_name' to be a str")
pulumi.set(__self__, "tenant_name", tenant_name)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="principalId")
def principal_id(self) -> str:
"""
The principal ID assigned to the database principal. It can be a user email, application ID, or security group name.
"""
return pulumi.get(self, "principal_id")
@property
@pulumi.getter(name="principalName")
def principal_name(self) -> str:
"""
The principal name
"""
return pulumi.get(self, "principal_name")
@property
@pulumi.getter(name="principalType")
def principal_type(self) -> str:
"""
Principal type.
"""
return pulumi.get(self, "principal_type")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioned state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def role(self) -> str:
"""
Database principal role.
"""
return pulumi.get(self, "role")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> Optional[str]:
"""
The tenant id of the principal
"""
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter(name="tenantName")
def tenant_name(self) -> str:
"""
The tenant name of the principal
"""
return pulumi.get(self, "tenant_name")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetDatabasePrincipalAssignmentResult(GetDatabasePrincipalAssignmentResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDatabasePrincipalAssignmentResult(
id=self.id,
name=self.name,
principal_id=self.principal_id,
principal_name=self.principal_name,
principal_type=self.principal_type,
provisioning_state=self.provisioning_state,
role=self.role,
tenant_id=self.tenant_id,
tenant_name=self.tenant_name,
type=self.type)
def get_database_principal_assignment(cluster_name: Optional[str] = None,
database_name: Optional[str] = None,
principal_assignment_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDatabasePrincipalAssignmentResult:
"""
Class representing a database principal assignment.
:param str cluster_name: The name of the Kusto cluster.
:param str database_name: The name of the database in the Kusto cluster.
:param str principal_assignment_name: The name of the Kusto principalAssignment.
:param str resource_group_name: The name of the resource group containing the Kusto cluster.
"""
__args__ = dict()
__args__['clusterName'] = cluster_name
__args__['databaseName'] = database_name
__args__['principalAssignmentName'] = principal_assignment_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:kusto/v20210101:getDatabasePrincipalAssignment', __args__, opts=opts, typ=GetDatabasePrincipalAssignmentResult).value
return AwaitableGetDatabasePrincipalAssignmentResult(
id=__ret__.id,
name=__ret__.name,
principal_id=__ret__.principal_id,
principal_name=__ret__.principal_name,
principal_type=__ret__.principal_type,
provisioning_state=__ret__.provisioning_state,
role=__ret__.role,
tenant_id=__ret__.tenant_id,
tenant_name=__ret__.tenant_name,
type=__ret__.type)
@_utilities.lift_output_func(get_database_principal_assignment)
def get_database_principal_assignment_output(cluster_name: Optional[pulumi.Input[str]] = None,
database_name: Optional[pulumi.Input[str]] = None,
principal_assignment_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetDatabasePrincipalAssignmentResult]:
"""
Class representing a database principal assignment.
:param str cluster_name: The name of the Kusto cluster.
:param str database_name: The name of the database in the Kusto cluster.
:param str principal_assignment_name: The name of the Kusto principalAssignment.
:param str resource_group_name: The name of the resource group containing the Kusto cluster.
"""
...
| [
"[email protected]"
] | |
b4f53d695d80feab5e9b69fa72d78e8512187c80 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_affiliations.py | 42384cf4a16916f7866a17a2fbb1a0dcf5f45e29 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 242 | py |
#calss header
class _AFFILIATIONS():
def __init__(self,):
self.name = "AFFILIATIONS"
self.definitions = affiliation
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['affiliation']
| [
"[email protected]"
] | |
fed4bdd2449e04bd15775b80ce99c2bf71bc3df3 | bbe447a740929eaee1955bd9c1517cf760dd5cb9 | /aralib/adwordsApi/examples/v201003/add_negative_campaign_criterion.py | 54e653048ddbd0762277bfa7d7600e8aed50b548 | [
"Apache-2.0"
] | permissive | MujaahidSalie/aranciulla | f3d32e7dd68ecfca620fe4d3bf22ecb4762f5893 | 34197dfbdb01479f288611a0cb700e925c4e56ce | refs/heads/master | 2020-09-07T02:16:25.261598 | 2011-11-01T21:20:46 | 2011-11-01T21:20:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,065 | py | #!/usr/bin/python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""This example creates new negative campaign criterion. To create campaign, run
add_campaign.py.
Tags: CampaignCriterionService.mutate
"""
__author__ = '[email protected] (Stan Grinberg)'
import sys
sys.path.append('../..')
# Import appropriate classes from the client library.
from aw_api.Client import Client
# Initialize client object.
client = Client(path='../..')
# Initialize appropriate service.
campaign_criterion_service = client.GetCampaignCriterionService(
'https://adwords-sandbox.google.com', 'v201003')
# Construct campaign criterion object and add negative campaign criterion.
campaign_id = 'INSERT_CAMPAIGN_ID_HERE'
operations = [{
'operator': 'ADD',
'operand': {
'type': 'NegativeCampaignCriterion',
'campaignId': campaign_id,
'criterion': {
'type': 'Keyword',
'matchType': 'BROAD',
'text': 'jupiter cruise'
}
}
}]
campaign_criterion = campaign_criterion_service.Mutate(
operations)[0]['value'][0]
# Display results.
print ('New negative campaign criterion with \'%s\' id and \'%s\' text was '
'successfully added to \'%s\' campaign.'
% (campaign_criterion['criterion']['id'],
campaign_criterion['criterion']['text'],
campaign_criterion['campaignId']))
print
print ('Usage: %s units, %s operations' % (client.GetUnits(),
client.GetOperations()))
| [
"[email protected]"
] | |
d90727f571830e06611f1580efa36793cde8b63e | b08bddd99d49ff242aa890b491cbbdf09ce128f0 | /apps/login_app/migrations/0004_auto_20170823_1152.py | 3226750b3561b5ea2af2985e960273767eed143f | [] | no_license | HollinRoberts/friends | 24b99c031a7771ad1b35a22112658f01fe3d8090 | ae22c690f6800c74b6f794f44eefd97b607d008a | refs/heads/master | 2021-01-20T05:28:42.381205 | 2017-08-25T22:41:21 | 2017-08-25T22:41:21 | 101,447,198 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 640 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-23 18:52
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('login_app', '0003_auto_20170823_1148'),
]
operations = [
migrations.AddField(
model_name='poke',
name='created_at',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AddField(
model_name='poke',
name='updated_at',
field=models.DateTimeField(auto_now=True, null=True),
),
]
| [
"[email protected]"
] | |
c4f9a57a58113e650a9ac005d75441afa0d6d22e | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/stacksNqueues_20200722084452.py | d0552e00c0ca213a09d1eebd1be5b92128dad478 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | # we'll use a list to rep stack and a queue
# empty list
# stack is last in first out
stack = []
stack.append(1)
stack.append(2)
stack.append(3)
stack.append(4)
# remove item from stack(pop)
x = stack.pop()
from collections import deque
# create empty deque
queue = deque()
queue.append(1)
queue.append(2)
queue.append(3)
queue.append(4)
print(queue)
# remove elements from the front of the list
# Queue is first i f
y = queue.popleft()
print(y)
print(queue)
| [
"[email protected]"
] | |
ca9694bf200c7f1b74e093a94f8a7fb7b3f38eb3 | 94f156b362fbce8f89c8e15cd7687f8af267ef08 | /endterm/main/permissions.py | fa03d8dfbbb352fd4d500b5d47d7396758ac8649 | [] | no_license | DastanB/AdvancedDjango | 6eee5477cd5a00423972c9cc3d2b5f1e4a501841 | 2b5d4c22b278c6d0e08ab7e84161163fe42e9a3f | refs/heads/master | 2020-07-17T19:21:16.271964 | 2019-12-03T21:58:51 | 2019-12-03T21:58:51 | 206,081,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | from rest_framework.permissions import IsAuthenticated, BasePermission
from django.contrib.auth.models import User
class ArticlePermission(BasePermission):
message = 'You must be the owner of the project.'
def has_permission(self, request, view):
return True
def has_object_permission(self, request, view, obj):
if view.action is not 'list':
return request.user == obj.creator | [
"[email protected]"
] | |
27612de0eb84d3c9a15217b1cf2ccef7a2e61e91 | 95df12156e4dd24ed3646a93da972ab1a8e654f5 | /propmix/hpiraw/hpiraw_api_server/hpiraw/dbauth.py | 046ff02ca76ec3638b865c08fa7bf60437ba96d4 | [] | no_license | sijuaugustin/ingts | 1cf05e9acaac85181f82b8442537755a7799e300 | 68df567caa7c581e89eea7130fa8a45cd83a40ae | refs/heads/master | 2020-06-01T15:49:23.620893 | 2017-06-13T06:56:27 | 2017-06-13T06:56:27 | 94,078,907 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | '''
Created on Jan 6, 2017
@author: joseph
'''
DATABASE_ACCESS = {'name': 'hpi_api',
'password': 'hpi@!23',
'source': 'cognubauth'
}
| [
"[email protected]"
] | |
0cec4c494c0c1a152e35437198c2a8608035f010 | fe91ffa11707887e4cdddde8f386a8c8e724aa58 | /tools/binary_size/libsupersize/archive.py | be5db59f11a736cac8d20617911281efc0558082 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"LGPL-2.0-or-later",
"Zlib",
"LGPL-2.1-only",
"APSL-2.0",
"Apache-2.0",
"LGPL-2.0-only",
"MIT",
"LicenseRef-scancode-unknown"
] | permissive | akshaymarch7/chromium | 78baac2b45526031846ccbaeca96c639d1d60ace | d273c844a313b1e527dec0d59ce70c95fd2bd458 | refs/heads/master | 2023-02-26T23:48:03.686055 | 2020-04-15T01:20:07 | 2020-04-15T01:20:07 | 255,778,651 | 2 | 1 | BSD-3-Clause | 2020-04-15T02:04:56 | 2020-04-15T02:04:55 | null | UTF-8 | Python | false | false | 79,014 | py | # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Main Python API for analyzing binary size."""
import argparse
import bisect
import calendar
import collections
import datetime
import gzip
import itertools
import logging
import os
import posixpath
import re
import string
import subprocess
import sys
import zipfile
import zlib
import apkanalyzer
import ar
import demangle
import describe
import file_format
import function_signature
import linker_map_parser
import models
import ninja_parser
import nm
import obj_analyzer
import parallel
import path_util
import string_extract
import zip_util
sys.path.insert(1, os.path.join(path_util.SRC_ROOT, 'tools', 'grit'))
from grit.format import data_pack
_OWNERS_FILENAME = 'OWNERS'
_COMPONENT_REGEX = re.compile(r'\s*#\s*COMPONENT\s*:\s*(\S+)')
_FILE_PATH_REGEX = re.compile(r'\s*file://(\S+)')
_UNCOMPRESSED_COMPRESSION_RATIO_THRESHOLD = 0.9
_APKS_MAIN_APK = 'splits/base-master.apk'
# Holds computation state that is live only when an output directory exists.
_OutputDirectoryContext = collections.namedtuple('_OutputDirectoryContext', [
'elf_object_paths', # Only when elf_path is also provided.
'known_inputs', # Only when elf_path is also provided.
'output_directory',
'source_mapper',
'thin_archives',
])
# When ensuring matching section sizes between .elf and .map files, these
# sections should be ignored. When lld creates a combined library with
# partitions, some sections (like .text) exist in each partition, but the ones
# below are common. At library splitting time, llvm-objcopy pulls what's needed
# from these sections into the new libraries. Hence, the ELF sections will end
# up smaller than the combined .map file sections.
_SECTION_SIZE_BLACKLIST = ['.symtab', '.shstrtab', '.strtab']
# Tunable "knobs" for CreateSectionSizesAndSymbols().
class SectionSizeKnobs(object):
def __init__(self, is_bundle=False):
# A limit on the number of symbols an address can have, before these symbols
# are compacted into shared symbols. Increasing this value causes more data
# to be stored .size files, but is also more expensive.
# Effect of max_same_name_alias_count (as of Oct 2017, with min_pss = max):
# 1: shared .text syms = 1772874 bytes, file size = 9.43MiB (645476 syms).
# 2: shared .text syms = 1065654 bytes, file size = 9.58MiB (669952 syms).
# 6: shared .text syms = 464058 bytes, file size = 10.11MiB (782693 syms).
# 10: shared .text syms = 365648 bytes, file size = 10.24MiB (813758 syms).
# 20: shared .text syms = 86202 bytes, file size = 10.38MiB (854548 syms).
# 40: shared .text syms = 48424 bytes, file size = 10.50MiB (890396 syms).
# 50: shared .text syms = 41860 bytes, file size = 10.54MiB (902304 syms).
# max: shared .text syms = 0 bytes, file size = 11.10MiB (1235449 syms).
self.max_same_name_alias_count = 40 # 50kb is basically negligable.
# An estimate of pak translation compression ratio to make comparisons
# between .size files reasonable. Otherwise this can differ every pak
# change.
self.pak_compression_ratio = 0.38 if is_bundle else 0.33
# File name: Source file.
self.apk_other_files = {
'assets/icudtl.dat': '../../third_party/icu/android/icudtl.dat',
'assets/snapshot_blob_32.bin': '../../v8/snapshot_blob_32.bin',
'assets/snapshot_blob_64.bin': '../../v8/snapshot_blob_64.bin',
'assets/unwind_cfi_32': '../../base/trace_event/cfi_backtrace_android.cc',
'assets/webapk_dex_version.txt': (
'../../chrome/android/webapk/libs/runtime_library_version.gni'),
'lib/armeabi-v7a/libarcore_sdk_c_minimal.so': (
'../../third_party/arcore-android-sdk'),
'lib/armeabi-v7a/libarcore_sdk_c.so': (
'../../third_party/arcore-android-sdk'),
'lib/armeabi-v7a/libcrashpad_handler_trampoline.so': (
'../../third_party/crashpad/libcrashpad_handler_trampoline.so'),
'lib/arm64-v8a/libcrashpad_handler_trampoline.so': (
'../../third_party/crashpad/libcrashpad_handler_trampoline.so'),
}
self.apk_expected_other_files = {
# From Monochrome.apk
'AndroidManifest.xml',
'resources.arsc',
'assets/AndroidManifest.xml',
'assets/metaresources.arsc',
'META-INF/CERT.SF',
'META-INF/CERT.RSA',
'META-INF/CHROMIUM.SF',
'META-INF/CHROMIUM.RSA',
'META-INF/MANIFEST.MF',
}
self.analyze_java = True
self.analyze_native = True
self.src_root = path_util.SRC_ROOT
# Whether to count number of relative relocations instead of binary size
self.relocations_mode = False
def ModifyWithArgs(self, args):
if args.source_directory:
self.src_root = args.source_directory
if args.java_only:
self.analyze_java = True
self.analyze_native = False
if args.native_only:
self.analyze_java = False
self.analyze_native = True
if args.no_java:
self.analyze_java = False
if args.no_native:
self.analyze_native = False
if args.relocations:
self.relocations_mode = True
self.analyze_java = False
def _OpenMaybeGzAsText(path):
"""Calls `gzip.open()` if |path| ends in ".gz", otherwise calls `open()`."""
if path.endswith('.gz'):
return gzip.open(path, 'rt')
return open(path, 'rt')
def _NormalizeNames(raw_symbols):
"""Ensures that all names are formatted in a useful way.
This includes:
- Deriving |name| and |template_name| from |full_name|.
- Stripping of return types (for functions).
- Moving "vtable for" and the like to be suffixes rather than prefixes.
"""
found_prefixes = set()
for symbol in raw_symbols:
full_name = symbol.full_name
# See comment in _CalculatePadding() about when this can happen. Don't
# process names for non-native sections.
if symbol.IsPak():
# full_name: "about_ui_resources.grdp: IDR_ABOUT_UI_CREDITS_HTML".
space_idx = full_name.rindex(' ')
name = full_name[space_idx + 1:]
symbol.template_name = name
symbol.name = name
elif (full_name.startswith('*') or
symbol.IsOverhead() or
symbol.IsOther()):
symbol.template_name = full_name
symbol.name = full_name
elif symbol.IsDex():
symbol.full_name, symbol.template_name, symbol.name = (
function_signature.ParseJava(full_name))
elif symbol.IsStringLiteral():
symbol.full_name = full_name
symbol.template_name = full_name
symbol.name = full_name
elif symbol.IsNative():
# Remove [clone] suffix, and set flag accordingly.
# Search from left-to-right, as multiple [clone]s can exist.
# Example name suffixes:
# [clone .part.322] # GCC
# [clone .isra.322] # GCC
# [clone .constprop.1064] # GCC
# [clone .11064] # clang
# http://unix.stackexchange.com/questions/223013/function-symbol-gets-part-suffix-after-compilation
idx = full_name.find(' [clone ')
if idx != -1:
full_name = full_name[:idx]
symbol.flags |= models.FLAG_CLONE
# Clones for C symbols.
if symbol.section == 't':
idx = full_name.rfind('.')
if idx != -1 and full_name[idx + 1:].isdigit():
new_name = full_name[:idx]
# Generated symbols that end with .123 but are not clones.
# Find these via:
# size_info.symbols.WhereInSection('t').WhereIsGroup().SortedByCount()
if new_name not in ('__tcf_0', 'startup'):
full_name = new_name
symbol.flags |= models.FLAG_CLONE
# Remove .part / .isra / .constprop.
idx = full_name.rfind('.', 0, idx)
if idx != -1:
full_name = full_name[:idx]
# E.g.: vtable for FOO
idx = full_name.find(' for ', 0, 30)
if idx != -1:
found_prefixes.add(full_name[:idx + 4])
full_name = '{} [{}]'.format(full_name[idx + 5:], full_name[:idx])
# E.g.: virtual thunk to FOO
idx = full_name.find(' to ', 0, 30)
if idx != -1:
found_prefixes.add(full_name[:idx + 3])
full_name = '{} [{}]'.format(full_name[idx + 4:], full_name[:idx])
# Strip out return type, and split out name, template_name.
# Function parsing also applies to non-text symbols.
# E.g. Function statics.
symbol.full_name, symbol.template_name, symbol.name = (
function_signature.Parse(full_name))
# Remove anonymous namespaces (they just harm clustering).
symbol.template_name = symbol.template_name.replace(
'(anonymous namespace)::', '')
symbol.full_name = symbol.full_name.replace(
'(anonymous namespace)::', '')
non_anonymous_name = symbol.name.replace('(anonymous namespace)::', '')
if symbol.name != non_anonymous_name:
symbol.flags |= models.FLAG_ANONYMOUS
symbol.name = non_anonymous_name
# Allow using "is" to compare names (and should help with RAM). This applies
# to all symbols.
function_signature.InternSameNames(symbol)
logging.debug('Found name prefixes of: %r', found_prefixes)
def _NormalizeObjectPath(path):
"""Normalizes object paths.
Prefixes are removed: obj/, ../../
Archive names made more pathy: foo/bar.a(baz.o) -> foo/bar.a/baz.o
"""
if path.startswith('obj/'):
# Convert obj/third_party/... -> third_party/...
path = path[4:]
elif path.startswith('../../'):
# Convert ../../third_party/... -> third_party/...
path = path[6:]
if path.endswith(')'):
# Convert foo/bar.a(baz.o) -> foo/bar.a/baz.o so that hierarchical
# breakdowns consider the .o part to be a separate node.
start_idx = path.rindex('(')
path = os.path.join(path[:start_idx], path[start_idx + 1:-1])
return path
def _NormalizeSourcePath(path):
"""Returns (is_generated, normalized_path)"""
if path.startswith('gen/'):
# Convert gen/third_party/... -> third_party/...
return True, path[4:]
if path.startswith('../../'):
# Convert ../../third_party/... -> third_party/...
return False, path[6:]
return True, path
def _ExtractSourcePathsAndNormalizeObjectPaths(raw_symbols, source_mapper):
"""Fills in the |source_path| attribute and normalizes |object_path|."""
if source_mapper:
logging.info('Looking up source paths from ninja files')
for symbol in raw_symbols:
object_path = symbol.object_path
if symbol.IsDex() or symbol.IsOther():
if symbol.source_path:
symbol.generated_source, symbol.source_path = _NormalizeSourcePath(
symbol.source_path)
elif object_path:
# We don't have source info for prebuilt .a files.
if not os.path.isabs(object_path) and not object_path.startswith('..'):
source_path = source_mapper.FindSourceForPath(object_path)
if source_path:
symbol.generated_source, symbol.source_path = (
_NormalizeSourcePath(source_path))
symbol.object_path = _NormalizeObjectPath(object_path)
assert source_mapper.unmatched_paths_count == 0, (
'One or more source file paths could not be found. Likely caused by '
'.ninja files being generated at a different time than the .map file.')
else:
logging.info('Normalizing object paths')
for symbol in raw_symbols:
if symbol.object_path:
symbol.object_path = _NormalizeObjectPath(symbol.object_path)
def _ComputeAncestorPath(path_list, symbol_count):
"""Returns the common ancestor of the given paths."""
if not path_list:
return ''
prefix = os.path.commonprefix(path_list)
# Check if all paths were the same.
if prefix == path_list[0]:
return prefix
# Put in buckets to cut down on the number of unique paths.
if symbol_count >= 100:
symbol_count_str = '100+'
elif symbol_count >= 50:
symbol_count_str = '50-99'
elif symbol_count >= 20:
symbol_count_str = '20-49'
elif symbol_count >= 10:
symbol_count_str = '10-19'
else:
symbol_count_str = str(symbol_count)
# Put the path count as a subdirectory so that grouping by path will show
# "{shared}" as a bucket, and the symbol counts as leafs.
if not prefix:
return os.path.join('{shared}', symbol_count_str)
return os.path.join(os.path.dirname(prefix), '{shared}', symbol_count_str)
def _CompactLargeAliasesIntoSharedSymbols(raw_symbols, knobs):
"""Converts symbols with large number of aliases into single symbols.
The merged symbol's path fields are changed to common-ancestor paths in
the form: common/dir/{shared}/$SYMBOL_COUNT
Assumes aliases differ only by path (not by name).
"""
num_raw_symbols = len(raw_symbols)
num_shared_symbols = 0
src_cursor = 0
dst_cursor = 0
while src_cursor < num_raw_symbols:
symbol = raw_symbols[src_cursor]
raw_symbols[dst_cursor] = symbol
dst_cursor += 1
aliases = symbol.aliases
if aliases and len(aliases) > knobs.max_same_name_alias_count:
symbol.source_path = _ComputeAncestorPath(
[s.source_path for s in aliases if s.source_path], len(aliases))
symbol.object_path = _ComputeAncestorPath(
[s.object_path for s in aliases if s.object_path], len(aliases))
symbol.generated_source = all(s.generated_source for s in aliases)
symbol.aliases = None
num_shared_symbols += 1
src_cursor += len(aliases)
else:
src_cursor += 1
raw_symbols[dst_cursor:] = []
num_removed = src_cursor - dst_cursor
logging.debug('Converted %d aliases into %d shared-path symbols',
num_removed, num_shared_symbols)
def _ConnectNmAliases(raw_symbols):
"""Ensures |aliases| is set correctly for all symbols."""
prev_sym = raw_symbols[0]
for sym in raw_symbols[1:]:
# Don't merge bss symbols.
if sym.address > 0 and prev_sym.address == sym.address:
# Don't merge padding-only symbols (** symbol gaps).
if prev_sym.size > 0:
# Don't merge if already merged.
if prev_sym.aliases is None or prev_sym.aliases is not sym.aliases:
if prev_sym.aliases:
prev_sym.aliases.append(sym)
else:
prev_sym.aliases = [prev_sym, sym]
sym.aliases = prev_sym.aliases
prev_sym = sym
def _AssignNmAliasPathsAndCreatePathAliases(raw_symbols, object_paths_by_name):
num_found_paths = 0
num_unknown_names = 0
num_path_mismatches = 0
num_aliases_created = 0
ret = []
for symbol in raw_symbols:
ret.append(symbol)
full_name = symbol.full_name
# '__typeid_' symbols appear in linker .map only, and not nm output.
if full_name.startswith('__typeid_'):
if object_paths_by_name.get(full_name):
logging.warning('Found unexpected __typeid_ symbol in nm output: %s',
full_name)
continue
# Don't skip if symbol.IsBss(). This is needed for LLD-LTO to work, since
# .bss object_path data are unavailable for linker_map_parser, and need to
# be extracted here. For regular LLD flow, incorrect aliased symbols can
# arise. But that's a lesser evil compared to having LLD-LTO .bss missing
# object_path and source_path.
# TODO(huangs): Fix aliased symbols for the LLD case.
if (symbol.IsStringLiteral() or
not full_name or
full_name[0] in '*.' or # e.g. ** merge symbols, .Lswitch.table
full_name == 'startup'):
continue
object_paths = object_paths_by_name.get(full_name)
if object_paths:
num_found_paths += 1
else:
if num_unknown_names < 10:
logging.warning('Symbol not found in any .o files: %r', symbol)
num_unknown_names += 1
continue
if symbol.object_path and symbol.object_path not in object_paths:
if num_path_mismatches < 10:
logging.warning('Symbol path reported by .map not found by nm.')
logging.warning('sym=%r', symbol)
logging.warning('paths=%r', object_paths)
object_paths.append(symbol.object_path)
object_paths.sort()
num_path_mismatches += 1
symbol.object_path = object_paths[0]
if len(object_paths) > 1:
# Create one symbol for each object_path.
aliases = symbol.aliases or [symbol]
symbol.aliases = aliases
num_aliases_created += len(object_paths) - 1
for object_path in object_paths[1:]:
new_sym = models.Symbol(
symbol.section_name, symbol.size, address=symbol.address,
full_name=full_name, object_path=object_path, aliases=aliases)
aliases.append(new_sym)
ret.append(new_sym)
logging.debug('Cross-referenced %d symbols with nm output. '
'num_unknown_names=%d num_path_mismatches=%d '
'num_aliases_created=%d',
num_found_paths, num_unknown_names, num_path_mismatches,
num_aliases_created)
return ret
def _DiscoverMissedObjectPaths(raw_symbols, known_inputs):
# Missing object paths are caused by .a files added by -l flags, which are not
# listed as explicit inputs within .ninja rules.
missed_inputs = set()
for symbol in raw_symbols:
path = symbol.object_path
if path.endswith(')'):
# Convert foo/bar.a(baz.o) -> foo/bar.a
path = path[:path.rindex('(')]
if path and path not in known_inputs:
missed_inputs.add(path)
return missed_inputs
def _CreateMergeStringsReplacements(merge_string_syms,
list_of_positions_by_object_path):
"""Creates replacement symbols for |merge_syms|."""
ret = []
STRING_LITERAL_NAME = models.STRING_LITERAL_NAME
assert len(merge_string_syms) == len(list_of_positions_by_object_path)
tups = zip(merge_string_syms, list_of_positions_by_object_path)
for merge_sym, positions_by_object_path in tups:
merge_sym_address = merge_sym.address
new_symbols = []
ret.append(new_symbols)
for object_path, positions in positions_by_object_path.items():
for offset, size in positions:
address = merge_sym_address + offset
symbol = models.Symbol(
models.SECTION_RODATA,
size,
address=address,
full_name=STRING_LITERAL_NAME,
object_path=object_path)
new_symbols.append(symbol)
logging.debug('Created %d string literal symbols', sum(len(x) for x in ret))
logging.debug('Sorting string literals')
for symbols in ret:
# In order to achieve a total ordering in the presence of aliases, need to
# include both |address| and |object_path|.
# In order to achieve consistent deduping, need to include |size|.
symbols.sort(key=lambda x: (x.address, -x.size, x.object_path))
logging.debug('Deduping string literals')
num_removed = 0
size_removed = 0
num_aliases = 0
for i, symbols in enumerate(ret):
if not symbols:
continue
prev_symbol = symbols[0]
new_symbols = [prev_symbol]
for symbol in symbols[1:]:
padding = symbol.address - prev_symbol.end_address
if (prev_symbol.address == symbol.address and
prev_symbol.size == symbol.size):
# String is an alias.
num_aliases += 1
aliases = prev_symbol.aliases
if aliases:
aliases.append(symbol)
symbol.aliases = aliases
else:
aliases = [prev_symbol, symbol]
prev_symbol.aliases = aliases
symbol.aliases = aliases
elif padding + symbol.size <= 0:
# String is a substring of prior one.
num_removed += 1
size_removed += symbol.size
continue
elif padding < 0:
# String overlaps previous one. Adjust to not overlap.
symbol.address -= padding
symbol.size += padding
new_symbols.append(symbol)
prev_symbol = symbol
ret[i] = new_symbols
# Aliases come out in random order, so sort to be deterministic.
ret[i].sort(key=lambda s: (s.address, s.object_path))
logging.debug(
'Removed %d overlapping string literals (%d bytes) & created %d aliases',
num_removed, size_removed, num_aliases)
return ret
def _ParseComponentFromOwners(filename):
"""Searches an OWNERS file for lines that start with `# COMPONENT:`.
If an OWNERS file has no COMPONENT but references another OWNERS file, follow
the reference and check that file instead.
Args:
filename: Path to the file to parse.
Returns:
The text that follows the `# COMPONENT:` prefix, such as 'component>name'.
Empty string if no component found or the file didn't exist.
"""
reference_paths = []
try:
with open(filename) as f:
for line in f:
component_matches = _COMPONENT_REGEX.match(line)
path_matches = _FILE_PATH_REGEX.match(line)
if component_matches:
return component_matches.group(1)
elif path_matches:
reference_paths.append(path_matches.group(1))
except IOError:
return ''
if len(reference_paths) == 1:
newpath = os.path.join(path_util.SRC_ROOT, reference_paths[0])
return _ParseComponentFromOwners(newpath)
else:
return ''
def _FindComponentRoot(start_path, cache, knobs):
"""Searches all parent directories for COMPONENT in OWNERS files.
Args:
start_path: Path of directory to start searching from. Must be relative to
SRC_ROOT.
cache: Dict of OWNERS paths. Used instead of filesystem if paths are present
in the dict.
knobs: Instance of SectionSizeKnobs with tunable knobs and options.
Returns:
COMPONENT belonging to |start_path|, or empty string if not found.
"""
prev_dir = None
test_dir = start_path
# This loop will traverse the directory structure upwards until reaching
# SRC_ROOT, where test_dir and prev_dir will both equal an empty string.
while test_dir != prev_dir:
cached_component = cache.get(test_dir)
if cached_component:
return cached_component
elif cached_component is None:
owners_path = os.path.join(knobs.src_root, test_dir, _OWNERS_FILENAME)
component = _ParseComponentFromOwners(owners_path)
cache[test_dir] = component
if component:
return component
prev_dir = test_dir
test_dir = os.path.dirname(test_dir)
return ''
def _PopulateComponents(raw_symbols, knobs):
"""Populates the |component| field based on |source_path|.
Symbols without a |source_path| are skipped.
Args:
raw_symbols: list of Symbol objects.
knobs: Instance of SectionSizeKnobs. Tunable knobs and options.
"""
seen_paths = {}
for symbol in raw_symbols:
if symbol.source_path:
folder_path = os.path.dirname(symbol.source_path)
symbol.component = _FindComponentRoot(folder_path, seen_paths, knobs)
def _UpdateSymbolNamesFromNm(raw_symbols, names_by_address):
"""Updates raw_symbols names with extra information from nm."""
logging.debug('Update symbol names')
# linker_map_parser extracts '** outlined function' without knowing how many
# such symbols exist at each address. nm has this information, and stores the
# value as, e.g., '** outlined function * 5'. Copy the information over.
for s in raw_symbols:
if s.full_name.startswith('** outlined function'):
name_list = names_by_address.get(s.address)
if name_list:
for name in name_list:
if name.startswith('** outlined function'):
s.full_name = name
break
def _AddNmAliases(raw_symbols, names_by_address):
"""Adds symbols that were removed by identical code folding."""
# Step 1: Create list of (index_of_symbol, name_list).
logging.debug('Creating alias list')
replacements = []
num_new_symbols = 0
missing_names = collections.defaultdict(list)
for i, s in enumerate(raw_symbols):
# Don't alias padding-only symbols (e.g. ** symbol gap)
if s.size_without_padding == 0:
continue
# Also skip artificial symbols that won't appear in nm output.
if s.full_name.startswith('** CFI jump table'):
continue
name_list = names_by_address.get(s.address)
if name_list:
if s.full_name not in name_list:
missing_names[s.full_name].append(s.address)
logging.warning('Name missing from aliases: %08x %s %s', s.address,
s.full_name, name_list)
continue
replacements.append((i, name_list))
num_new_symbols += len(name_list) - 1
if missing_names and logging.getLogger().isEnabledFor(logging.INFO):
for address, names in names_by_address.items():
for name in names:
if name in missing_names:
logging.info('Missing name %s is at address %x instead of [%s]' %
(name, address, ','.join('%x' % a for a in missing_names[name])))
if float(num_new_symbols) / len(raw_symbols) < .05:
logging.warning('Number of aliases is oddly low (%.0f%%). It should '
'usually be around 25%%. Ensure --tool-prefix is correct. ',
float(num_new_symbols) / len(raw_symbols) * 100)
# Step 2: Create new symbols as siblings to each existing one.
logging.debug('Creating %d new symbols from nm output', num_new_symbols)
expected_num_symbols = len(raw_symbols) + num_new_symbols
ret = []
prev_src = 0
for cur_src, name_list in replacements:
ret += raw_symbols[prev_src:cur_src]
prev_src = cur_src + 1
sym = raw_symbols[cur_src]
# Create symbols (|sym| gets recreated and discarded).
new_syms = []
for full_name in name_list:
# Do not set |aliases| in order to avoid being pruned by
# _CompactLargeAliasesIntoSharedSymbols(), which assumes aliases differ
# only by path. The field will be set afterwards by _ConnectNmAliases().
new_syms.append(models.Symbol(
sym.section_name, sym.size, address=sym.address, full_name=full_name))
ret += new_syms
ret += raw_symbols[prev_src:]
assert expected_num_symbols == len(ret)
return ret
def LoadAndPostProcessSizeInfo(path, file_obj=None):
"""Returns a SizeInfo for the given |path|."""
logging.debug('Loading results from: %s', path)
size_info = file_format.LoadSizeInfo(path, file_obj=file_obj)
logging.info('Normalizing symbol names')
_NormalizeNames(size_info.raw_symbols)
logging.info('Loaded %d symbols', len(size_info.raw_symbols))
return size_info
def _CollectModuleSizes(minimal_apks_path):
sizes_by_module = collections.defaultdict(int)
with zipfile.ZipFile(minimal_apks_path) as z:
for info in z.infolist():
# E.g.:
# splits/base-master.apk
# splits/base-en.apk
# splits/vr-master.apk
# splits/vr-en.apk
# TODO(agrieve): Might be worth measuring a non-en locale as well.
m = re.match(r'splits/(.*)-master\.apk', info.filename)
if m:
sizes_by_module[m.group(1)] += info.file_size
return sizes_by_module
def _ExtendSectionRange(section_range_by_name, section_name, delta_size):
(prev_address, prev_size) = section_range_by_name.get(section_name, (0, 0))
section_range_by_name[section_name] = (prev_address, prev_size + delta_size)
def CreateMetadata(map_path, elf_path, apk_path, minimal_apks_path,
tool_prefix, output_directory, linker_name):
"""Creates metadata dict.
Args:
map_path: Path to the linker .map(.gz) file to parse.
elf_path: Path to the corresponding unstripped ELF file. Used to find symbol
aliases and inlined functions. Can be None.
apk_path: Path to the .apk file to measure.
minimal_apks_path: Path to the .minimal.apks file to measure.
tool_prefix: Prefix for c++filt & nm.
output_directory: Build output directory.
linker_name: A coded linker name (see linker_map_parser.py).
Returns:
None if |elf_path| is not supplied. Otherwise returns dict mapping string
constants to values.
If |elf_path| is supplied, git revision and elf info are included.
If |output_directory| is also supplied, then filenames will be included.
"""
assert not (apk_path and minimal_apks_path)
metadata = None
if elf_path:
logging.debug('Constructing metadata')
git_rev = _DetectGitRevision(os.path.dirname(elf_path))
architecture = _ArchFromElf(elf_path, tool_prefix)
build_id = BuildIdFromElf(elf_path, tool_prefix)
timestamp_obj = datetime.datetime.utcfromtimestamp(os.path.getmtime(
elf_path))
timestamp = calendar.timegm(timestamp_obj.timetuple())
relative_tool_prefix = path_util.ToSrcRootRelative(tool_prefix)
relocations_count = _CountRelocationsFromElf(elf_path, tool_prefix)
metadata = {
models.METADATA_GIT_REVISION: git_rev,
models.METADATA_ELF_ARCHITECTURE: architecture,
models.METADATA_ELF_MTIME: timestamp,
models.METADATA_ELF_BUILD_ID: build_id,
models.METADATA_LINKER_NAME: linker_name,
models.METADATA_TOOL_PREFIX: relative_tool_prefix,
models.METADATA_ELF_RELOCATIONS_COUNT: relocations_count
}
if output_directory:
relative_to_out = lambda path: os.path.relpath(path, output_directory)
gn_args = _ParseGnArgs(os.path.join(output_directory, 'args.gn'))
metadata[models.METADATA_MAP_FILENAME] = relative_to_out(map_path)
metadata[models.METADATA_ELF_FILENAME] = relative_to_out(elf_path)
metadata[models.METADATA_GN_ARGS] = gn_args
if apk_path:
metadata[models.METADATA_APK_FILENAME] = relative_to_out(apk_path)
metadata[models.METADATA_APK_SIZE] = os.path.getsize(apk_path)
elif minimal_apks_path:
sizes_by_module = _CollectModuleSizes(minimal_apks_path)
metadata[models.METADATA_APK_FILENAME] = relative_to_out(
minimal_apks_path)
for name, size in sizes_by_module.items():
key = models.METADATA_APK_SIZE
if name != 'base':
key += '-' + name
metadata[key] = size
return metadata
def _ResolveThinArchivePaths(raw_symbols, thin_archives):
"""Converts object_paths for thin archives to external .o paths."""
for symbol in raw_symbols:
object_path = symbol.object_path
if object_path.endswith(')'):
start_idx = object_path.rindex('(')
archive_path = object_path[:start_idx]
if archive_path in thin_archives:
subpath = object_path[start_idx + 1:-1]
symbol.object_path = ar.CreateThinObjectPath(archive_path, subpath)
def _DeduceObjectPathForSwitchTables(raw_symbols, object_paths_by_name):
strip_num_suffix_regexp = re.compile(r'\s+\(\.\d+\)$')
num_switch_tables = 0
num_unassigned = 0
num_deduced = 0
num_arbitrations = 0
for s in raw_symbols:
if s.full_name.startswith('Switch table for '):
num_switch_tables += 1
# Strip 'Switch table for ' prefix.
name = s.full_name[17:]
# Strip, e.g., ' (.123)' suffix.
name = re.sub(strip_num_suffix_regexp, '', name)
object_paths = object_paths_by_name.get(name, None)
if not s.object_path:
if object_paths is None:
num_unassigned += 1
else:
num_deduced += 1
# If ambiguity arises, arbitrate by taking the first.
s.object_path = object_paths[0]
if len(object_paths) > 1:
num_arbitrations += 1
else:
assert object_paths and s.object_path in object_paths
if num_switch_tables > 0:
logging.info(
'Found %d switch tables: Deduced %d object paths with ' +
'%d arbitrations. %d remain unassigned.', num_switch_tables,
num_deduced, num_arbitrations, num_unassigned)
def _NameStringLiterals(raw_symbols, elf_path, tool_prefix):
# Assign ASCII-readable string literals names like "string contents".
STRING_LENGTH_CUTOFF = 30
PRINTABLE_TBL = [False] * 256
for ch in string.printable:
PRINTABLE_TBL[ord(ch)] = True
for sym, name in string_extract.ReadStringLiterals(raw_symbols, elf_path,
tool_prefix):
# Newlines and tabs are used as delimiters in file_format.py
# At this point, names still have a terminating null byte.
name = name.replace(b'\n', b'').replace(b'\t', b'').strip(b'\00')
is_printable = all(PRINTABLE_TBL[c] for c in name)
if is_printable:
name = name.decode('ascii')
if len(name) > STRING_LENGTH_CUTOFF:
sym.full_name = '"{}[...]"'.format(name[:STRING_LENGTH_CUTOFF])
else:
sym.full_name = '"{}"'.format(name)
else:
sym.full_name = models.STRING_LITERAL_NAME
def _ParseElfInfo(map_path, elf_path, tool_prefix, track_string_literals,
outdir_context=None, linker_name=None):
"""Adds ELF section ranges and symbols."""
if elf_path:
# Run nm on the elf file to retrieve the list of symbol names per-address.
# This list is required because the .map file contains only a single name
# for each address, yet multiple symbols are often coalesced when they are
# identical. This coalescing happens mainly for small symbols and for C++
# templates. Such symbols make up ~500kb of libchrome.so on Android.
elf_nm_result = nm.CollectAliasesByAddressAsync(elf_path, tool_prefix)
# Run nm on all .o/.a files to retrieve the symbol names within them.
# The list is used to detect when mutiple .o files contain the same symbol
# (e.g. inline functions), and to update the object_path / source_path
# fields accordingly.
# Looking in object files is required because the .map file choses a
# single path for these symbols.
# Rather than record all paths for each symbol, set the paths to be the
# common ancestor of all paths.
if outdir_context:
bulk_analyzer = obj_analyzer.BulkObjectFileAnalyzer(
tool_prefix, outdir_context.output_directory,
track_string_literals=track_string_literals)
bulk_analyzer.AnalyzePaths(outdir_context.elf_object_paths)
logging.info('Parsing Linker Map')
with _OpenMaybeGzAsText(map_path) as map_file:
map_section_ranges, raw_symbols, linker_map_extras = (
linker_map_parser.MapFileParser().Parse(linker_name, map_file))
if outdir_context and outdir_context.thin_archives:
_ResolveThinArchivePaths(raw_symbols, outdir_context.thin_archives)
if elf_path:
logging.debug('Validating section sizes')
elf_section_ranges = _SectionInfoFromElf(elf_path, tool_prefix)
differing_elf_section_sizes = {}
differing_map_section_sizes = {}
for k, (_, elf_size) in elf_section_ranges.items():
if k in _SECTION_SIZE_BLACKLIST:
continue
(_, map_size) = map_section_ranges.get(k)
if map_size != elf_size:
differing_map_section_sizes[k] = map_size
differing_elf_section_sizes[k] = elf_size
if differing_map_section_sizes:
logging.error('ELF file and .map file do not agree on section sizes.')
logging.error('readelf: %r', differing_elf_section_sizes)
logging.error('.map file: %r', differing_map_section_sizes)
sys.exit(1)
if elf_path and outdir_context:
missed_object_paths = _DiscoverMissedObjectPaths(
raw_symbols, outdir_context.known_inputs)
missed_object_paths = ar.ExpandThinArchives(
missed_object_paths, outdir_context.output_directory)[0]
bulk_analyzer.AnalyzePaths(missed_object_paths)
bulk_analyzer.SortPaths()
if track_string_literals:
merge_string_syms = [s for s in raw_symbols if
s.full_name == '** merge strings' or
s.full_name == '** lld merge strings']
# More likely for there to be a bug in supersize than an ELF to not have a
# single string literal.
assert merge_string_syms
string_ranges = [(s.address, s.size) for s in merge_string_syms]
bulk_analyzer.AnalyzeStringLiterals(elf_path, string_ranges)
# Map file for some reason doesn't demangle all names.
# Demangle prints its own log statement.
demangle.DemangleRemainingSymbols(raw_symbols, tool_prefix)
object_paths_by_name = {}
if elf_path:
logging.info(
'Adding symbols removed by identical code folding (as reported by nm)')
# This normally does not block (it's finished by this time).
names_by_address = elf_nm_result.get()
_UpdateSymbolNamesFromNm(raw_symbols, names_by_address)
raw_symbols = _AddNmAliases(raw_symbols, names_by_address)
if outdir_context:
object_paths_by_name = bulk_analyzer.GetSymbolNames()
logging.debug(
'Fetched path information for %d symbols from %d files',
len(object_paths_by_name),
len(outdir_context.elf_object_paths) + len(missed_object_paths))
_DeduceObjectPathForSwitchTables(raw_symbols, object_paths_by_name)
# For aliases, this provides path information where there wasn't any.
logging.info('Creating aliases for symbols shared by multiple paths')
raw_symbols = _AssignNmAliasPathsAndCreatePathAliases(
raw_symbols, object_paths_by_name)
if track_string_literals:
logging.info('Waiting for string literal extraction to complete.')
list_of_positions_by_object_path = bulk_analyzer.GetStringPositions()
bulk_analyzer.Close()
if track_string_literals:
logging.info('Deconstructing ** merge strings into literals')
replacements = _CreateMergeStringsReplacements(merge_string_syms,
list_of_positions_by_object_path)
for merge_sym, literal_syms in zip(merge_string_syms, replacements):
# Don't replace if no literals were found.
if literal_syms:
# Re-find the symbols since aliases cause their indices to change.
idx = raw_symbols.index(merge_sym)
# This assignment is a bit slow (causes array to be shifted), but
# is fast enough since len(merge_string_syms) < 10.
raw_symbols[idx:idx + 1] = literal_syms
linker_map_parser.DeduceObjectPathsFromThinMap(raw_symbols, linker_map_extras)
if elf_path:
_NameStringLiterals(raw_symbols, elf_path, tool_prefix)
# If we have an ELF file, use its ranges as the source of truth, since some
# sections can differ from the .map.
return (elf_section_ranges if elf_path else map_section_ranges, raw_symbols,
object_paths_by_name)
def _ComputePakFileSymbols(
file_name, contents, res_info, symbols_by_id, compression_ratio=1):
id_map = {
id(v): k
for k, v in sorted(list(contents.resources.items()), reverse=True)
}
alias_map = {
k: id_map[id(v)]
for k, v in contents.resources.items() if id_map[id(v)] != k
}
# Longest locale pak is: es-419.pak.
# Only non-translated .pak files are: resources.pak, chrome_100_percent.pak.
if len(posixpath.basename(file_name)) <= 10:
section_name = models.SECTION_PAK_TRANSLATIONS
else:
section_name = models.SECTION_PAK_NONTRANSLATED
overhead = (12 + 6) * compression_ratio # Header size plus extra offset
symbols_by_id[hash(file_name)] = models.Symbol(
section_name, overhead, full_name='Overhead: {}'.format(file_name))
for resource_id in sorted(contents.resources):
if resource_id in alias_map:
# 4 extra bytes of metadata (2 16-bit ints)
size = 4
resource_id = alias_map[resource_id]
else:
resource_data = contents.resources[resource_id]
# 6 extra bytes of metadata (1 32-bit int, 1 16-bit int)
size = len(resource_data) + 6
name, source_path = res_info[resource_id]
if resource_id not in symbols_by_id:
full_name = '{}: {}'.format(source_path, name)
new_symbol = models.Symbol(
section_name, 0, address=resource_id, full_name=full_name)
if (section_name == models.SECTION_PAK_NONTRANSLATED and
_IsPakContentUncompressed(resource_data)):
new_symbol.flags |= models.FLAG_UNCOMPRESSED
symbols_by_id[resource_id] = new_symbol
size *= compression_ratio
symbols_by_id[resource_id].size += size
return section_name
def _IsPakContentUncompressed(content):
raw_size = len(content)
# Assume anything less than 100 bytes cannot be compressed.
if raw_size < 100:
return False
compressed_size = len(zlib.compress(content, 1))
compression_ratio = compressed_size / float(raw_size)
return compression_ratio < _UNCOMPRESSED_COMPRESSION_RATIO_THRESHOLD
class _ResourceSourceMapper(object):
def __init__(self, size_info_prefix, knobs):
self._knobs = knobs
self._res_info = self._LoadResInfo(size_info_prefix)
self._pattern_dollar_underscore = re.compile(r'\$+(.*?)(?:__\d)+')
self._pattern_version_suffix = re.compile(r'-v\d+/')
@staticmethod
def _ParseResInfoFile(res_info_path):
with open(res_info_path, 'r') as info_file:
return dict(l.rstrip().split('\t') for l in info_file)
def _LoadResInfo(self, size_info_prefix):
apk_res_info_path = size_info_prefix + '.res.info'
res_info_without_root = self._ParseResInfoFile(apk_res_info_path)
# We package resources in the res/ folder only in the apk.
res_info = {
os.path.join('res', dest): source
for dest, source in res_info_without_root.items()
}
res_info.update(self._knobs.apk_other_files)
return res_info
def FindSourceForPath(self, path):
original_path = path
# Sometimes android adds $ in front and __# before extension.
path = self._pattern_dollar_underscore.sub(r'\1', path)
ret = self._res_info.get(path)
if ret:
return ret
# Android build tools may append extra -v flags for the root dir.
path = self._pattern_version_suffix.sub('/', path)
ret = self._res_info.get(path)
if ret:
return ret
if original_path not in self._knobs.apk_expected_other_files:
logging.warning('Unexpected file in apk: %s', original_path)
return None
def _ParsePakInfoFile(pak_info_path):
with open(pak_info_path, 'r') as info_file:
res_info = {}
for line in info_file.readlines():
name, res_id, path = line.split(',')
res_info[int(res_id)] = (name, path.strip())
return res_info
def _ParsePakSymbols(symbols_by_id, object_paths_by_pak_id):
raw_symbols = []
for resource_id, symbol in symbols_by_id.items():
raw_symbols.append(symbol)
paths = object_paths_by_pak_id.get(resource_id)
if not paths:
continue
symbol.object_path = paths.pop()
if not paths:
continue
aliases = symbol.aliases or [symbol]
symbol.aliases = aliases
for path in paths:
new_sym = models.Symbol(
symbol.section_name, symbol.size, address=symbol.address,
full_name=symbol.full_name, object_path=path, aliases=aliases)
aliases.append(new_sym)
raw_symbols.append(new_sym)
raw_symbols.sort(key=lambda s: (s.section_name, s.address, s.object_path))
raw_total = 0.0
int_total = 0
for symbol in raw_symbols:
raw_total += symbol.size
# We truncate rather than round to ensure that we do not over attribute. It
# is easier to add another symbol to make up the difference.
symbol.size = int(symbol.size)
int_total += symbol.size
# Attribute excess to translations since only those are compressed.
raw_symbols.append(models.Symbol(
models.SECTION_PAK_TRANSLATIONS, int(round(raw_total - int_total)),
full_name='Overhead: Pak compression artifacts'))
return raw_symbols
def _ParseApkElfSectionRanges(section_ranges, metadata, apk_elf_result):
if metadata:
logging.debug('Extracting section sizes from .so within .apk')
apk_build_id, apk_section_ranges, elf_overhead_size = apk_elf_result.get()
assert apk_build_id == metadata[models.METADATA_ELF_BUILD_ID], (
'BuildID from apk_elf_result did not match')
packed_section_name = None
architecture = metadata[models.METADATA_ELF_ARCHITECTURE]
# Packing occurs enabled only arm32 & arm64.
if architecture == 'arm':
packed_section_name = '.rel.dyn'
elif architecture == 'arm64':
packed_section_name = '.rela.dyn'
if packed_section_name:
unpacked_range = section_ranges.get(packed_section_name)
if unpacked_range is None:
logging.warning('Packed section not present: %s', packed_section_name)
elif unpacked_range != apk_section_ranges.get(packed_section_name):
# These ranges are different only when using relocation_packer, which
# hasn't been used since switching from gold -> lld.
apk_section_ranges['%s (unpacked)' %
packed_section_name] = unpacked_range
return apk_section_ranges, elf_overhead_size
return section_ranges, 0
class _ResourcePathDeobfuscator(object):
def __init__(self, pathmap_path):
self._pathmap = self._LoadResourcesPathmap(pathmap_path)
def _LoadResourcesPathmap(self, pathmap_path):
"""Load the pathmap of obfuscated resource paths.
Returns: A dict mapping from obfuscated paths to original paths or an
empty dict if passed a None |pathmap_path|.
"""
if pathmap_path is None:
return {}
pathmap = {}
with open(pathmap_path, 'r') as f:
for line in f:
line = line.strip()
if line.startswith('--') or line == '':
continue
original, renamed = line.split(' -> ')
pathmap[renamed] = original
return pathmap
def MaybeRemapPath(self, path):
long_path = self._pathmap.get(path)
if long_path:
return long_path
# if processing a .minimal.apks, we are actually just processing the base
# module.
long_path = self._pathmap.get('base/{}'.format(path))
if long_path:
# The first 5 chars are 'base/', which we don't need because we are
# looking directly inside the base module apk.
return long_path[5:]
return path
def _ParseApkOtherSymbols(section_ranges, apk_path, apk_so_path,
resources_pathmap_path, size_info_prefix, knobs):
res_source_mapper = _ResourceSourceMapper(size_info_prefix, knobs)
resource_deobfuscator = _ResourcePathDeobfuscator(resources_pathmap_path)
apk_symbols = []
dex_size = 0
zip_info_total = 0
with zipfile.ZipFile(apk_path) as z:
for zip_info in z.infolist():
zip_info_total += zip_info.compress_size
# Skip main shared library, pak, and dex files as they are accounted for.
if (zip_info.filename == apk_so_path
or zip_info.filename.endswith('.pak')):
continue
if zip_info.filename.endswith('.dex'):
dex_size += zip_info.file_size
continue
resource_filename = resource_deobfuscator.MaybeRemapPath(
zip_info.filename)
source_path = res_source_mapper.FindSourceForPath(resource_filename)
if source_path is None:
source_path = os.path.join(models.APK_PREFIX_PATH, resource_filename)
apk_symbols.append(
models.Symbol(
models.SECTION_OTHER,
zip_info.compress_size,
source_path=source_path,
full_name=resource_filename)) # Full name must disambiguate
overhead_size = os.path.getsize(apk_path) - zip_info_total
assert overhead_size >= 0, 'Apk overhead must be non-negative'
zip_overhead_symbol = models.Symbol(
models.SECTION_OTHER, overhead_size, full_name='Overhead: APK file')
apk_symbols.append(zip_overhead_symbol)
_ExtendSectionRange(section_ranges, models.SECTION_OTHER,
sum(s.size for s in apk_symbols))
return dex_size, apk_symbols
def _CreatePakObjectMap(object_paths_by_name):
# IDS_ macro usages result in templated function calls that contain the
# resource ID in them. These names are collected along with all other symbols
# by running "nm" on them. We just need to extract the values from them.
object_paths_by_pak_id = {}
PREFIX = 'void ui::WhitelistedResource<'
id_start_idx = len(PREFIX)
id_end_idx = -len('>()')
for name in object_paths_by_name:
if name.startswith(PREFIX):
pak_id = int(name[id_start_idx:id_end_idx])
object_paths_by_pak_id[pak_id] = object_paths_by_name[name]
return object_paths_by_pak_id
def _FindPakSymbolsFromApk(section_ranges, apk_path, size_info_prefix, knobs):
with zipfile.ZipFile(apk_path) as z:
pak_zip_infos = (f for f in z.infolist() if f.filename.endswith('.pak'))
pak_info_path = size_info_prefix + '.pak.info'
res_info = _ParsePakInfoFile(pak_info_path)
symbols_by_id = {}
total_compressed_size = 0
total_uncompressed_size = 0
for zip_info in pak_zip_infos:
contents = data_pack.ReadDataPackFromString(z.read(zip_info))
compression_ratio = 1.0
if zip_info.compress_size < zip_info.file_size:
total_compressed_size += zip_info.compress_size
total_uncompressed_size += zip_info.file_size
compression_ratio = knobs.pak_compression_ratio
section_name = _ComputePakFileSymbols(
zip_info.filename, contents,
res_info, symbols_by_id, compression_ratio=compression_ratio)
_ExtendSectionRange(section_ranges, section_name, zip_info.compress_size)
if total_uncompressed_size > 0:
actual_ratio = (
float(total_compressed_size) / total_uncompressed_size)
logging.info('Pak Compression Ratio: %f Actual: %f Diff: %.0f',
knobs.pak_compression_ratio, actual_ratio,
(knobs.pak_compression_ratio - actual_ratio) *
total_uncompressed_size)
return symbols_by_id
def _FindPakSymbolsFromFiles(section_ranges, pak_files, pak_info_path,
output_directory):
"""Uses files from args to find and add pak symbols."""
res_info = _ParsePakInfoFile(pak_info_path)
symbols_by_id = {}
for pak_file_path in pak_files:
with open(pak_file_path, 'rb') as f:
contents = data_pack.ReadDataPackFromString(f.read())
section_name = _ComputePakFileSymbols(
os.path.relpath(pak_file_path, output_directory), contents, res_info,
symbols_by_id)
_ExtendSectionRange(section_ranges, section_name,
os.path.getsize(pak_file_path))
return symbols_by_id
def _CalculateElfOverhead(section_ranges, elf_path):
if elf_path:
section_sizes_total_without_bss = sum(
size for k, (address, size) in section_ranges.items()
if k not in models.BSS_SECTIONS)
elf_overhead_size = (
os.path.getsize(elf_path) - section_sizes_total_without_bss)
assert elf_overhead_size >= 0, (
'Negative ELF overhead {}'.format(elf_overhead_size))
return elf_overhead_size
return 0
def _OverwriteSymbolSizesWithRelocationCount(raw_symbols, tool_prefix,
elf_path):
logging.info('Overwriting symbol sizes with relocation count')
native_symbols = [sym for sym in raw_symbols if sym.IsNative()]
symbol_addresses = [0] * (1 + len(native_symbols))
for i, symbol in enumerate(native_symbols):
symbol_addresses[i] = symbol.address
# Last symbol address is the end of the last symbol, so we don't misattribute
# all relros after the last symbol to that symbol.
symbol_addresses[-1] = native_symbols[-1].address + native_symbols[-1].size
for symbol in raw_symbols:
symbol.address = 0
symbol.size = 0
symbol.padding = 0
relocs_cmd = [path_util.GetReadElfPath(tool_prefix), '--relocs', elf_path]
relro_addresses = subprocess.check_output(relocs_cmd).decode('ascii').split(
'\n')
# Grab first column from (sample output) '02de6d5c 00000017 R_ARM_RELATIVE'
relro_addresses = [
int(l.split()[0], 16) for l in relro_addresses if 'R_ARM_RELATIVE' in l
]
# More likely for there to be a bug in supersize than an ELF to have any
# relative relocations.
assert relro_addresses
logging.info('Adding %d relocations', len(relro_addresses))
for addr in relro_addresses:
# Attribute relros to largest symbol start address that precede them.
idx = bisect.bisect_right(symbol_addresses, addr) - 1
if 0 <= idx < len(native_symbols):
symbol = native_symbols[idx]
for alias in symbol.aliases or [symbol]:
alias.size += 1
logging.info('Removing non-native symbols...')
raw_symbols[:] = [sym for sym in raw_symbols if sym.size or sym.IsNative()]
def _AddUnattributedSectionSymbols(raw_symbols, section_ranges, elf_result):
# Create symbols for ELF sections not covered by existing symbols.
logging.info('Searching for symbol gaps...')
_, section_ranges, _ = elf_result.get()
last_symbol_ends = collections.defaultdict(int)
for sym in raw_symbols:
if sym.end_address > last_symbol_ends[sym.section_name]:
last_symbol_ends[sym.section_name] = sym.end_address
for section_name, last_symbol_end in last_symbol_ends.items():
size_from_syms = last_symbol_end - section_ranges[section_name][0]
overhead = section_ranges[section_name][1] - size_from_syms
assert overhead >= 0, (
('End of last symbol (%x) in section %s is %d bytes after the end of '
'section from readelf (%x).') % (last_symbol_end, section_name,
-overhead,
sum(section_ranges[section_name])))
if overhead > 0 and section_name not in models.BSS_SECTIONS:
raw_symbols.append(
models.Symbol(
section_name,
overhead,
address=last_symbol_end,
full_name='** {} (unattributed)'.format(section_name)))
logging.info('Last symbol in %s does not reach end of section, gap=%d',
section_name, overhead)
# Sort keys to ensure consistent order (> 1 sections may have address = 0).
for section_name in sorted(section_ranges.keys()):
# Handle sections that don't appear in |raw_symbols|.
if section_name not in last_symbol_ends:
address, section_size = section_ranges[section_name]
logging.info('All bytes in %s are unattributed, gap=%d', section_name,
overhead)
raw_symbols.append(
models.Symbol(
models.SECTION_OTHER,
section_size,
full_name='** ELF Section: {}'.format(section_name),
address=address))
_ExtendSectionRange(section_ranges, models.SECTION_OTHER, section_size)
def CreateSectionSizesAndSymbols(map_path=None,
tool_prefix=None,
output_directory=None,
elf_path=None,
apk_path=None,
mapping_path=None,
resources_pathmap_path=None,
track_string_literals=True,
metadata=None,
apk_so_path=None,
pak_files=None,
pak_info_file=None,
linker_name=None,
size_info_prefix=None,
knobs=None):
"""Creates sections sizes and symbols for a SizeInfo.
Args:
map_path: Path to the linker .map(.gz) file to parse.
tool_prefix: Prefix for c++filt & nm (required).
output_directory: Build output directory. If None, source_paths and symbol
alias information will not be recorded.
elf_path: Path to the corresponding unstripped ELF file. Used to find symbol
aliases and inlined functions. Can be None.
apk_path: Path to the .apk file to measure.
mapping_path: Path to the .mapping file for DEX symbol processing.
resources_pathmap_path: Path to the pathmap file that maps original
resource paths to shortened resource paths.
track_string_literals: Whether to break down "** merge string" sections into
smaller symbols (requires output_directory).
metadata: Metadata dict from CreateMetadata().
apk_so_path: Path to an .so file within an APK file.
pak_files: List of paths to .pak files.
pak_info_file: Path to a .pak.info file.
linker_name: A coded linker name (see linker_map_parser.py).
size_info_prefix: Path to $out/size-info/$ApkName.
knobs: Instance of SectionSizeKnobs with tunable knobs and options.
Returns:
A tuple of (section_sizes, raw_symbols).
section_ranges is a dict mapping section names to their (address, size).
raw_symbols is a list of Symbol objects.
"""
knobs = knobs or SectionSizeKnobs()
if apk_path and elf_path:
# Extraction takes around 1 second, so do it in parallel.
apk_elf_result = parallel.ForkAndCall(_ElfInfoFromApk,
(apk_path, apk_so_path, tool_prefix))
outdir_context = None
source_mapper = None
if output_directory:
# Start by finding the elf_object_paths, so that nm can run on them while
# the linker .map is being parsed.
logging.info('Parsing ninja files.')
source_mapper, ninja_elf_object_paths = (
ninja_parser.Parse(output_directory, elf_path))
# If no symbols came from the library, it's because it's a partition
# extracted from a combined library. Look there instead.
if not ninja_elf_object_paths and elf_path:
combined_elf_path = elf_path.replace('.so', '__combined.so')
logging.info('Found no objects in %s, trying %s', elf_path,
combined_elf_path)
source_mapper, ninja_elf_object_paths = (ninja_parser.Parse(
output_directory, combined_elf_path))
if ninja_elf_object_paths:
assert map_path and '__combined.so.map' in map_path
logging.debug('Parsed %d .ninja files.', source_mapper.parsed_file_count)
assert not elf_path or ninja_elf_object_paths, (
'Failed to find link command in ninja files for ' +
os.path.relpath(elf_path, output_directory))
if ninja_elf_object_paths:
elf_object_paths, thin_archives = ar.ExpandThinArchives(
ninja_elf_object_paths, output_directory)
known_inputs = set(elf_object_paths)
known_inputs.update(ninja_elf_object_paths)
else:
elf_object_paths = None
known_inputs = None
# When we don't know which elf file is used, just search all paths.
if knobs.analyze_native:
thin_archives = set(
p for p in source_mapper.IterAllPaths() if p.endswith('.a')
and ar.IsThinArchive(os.path.join(output_directory, p)))
else:
thin_archives = None
outdir_context = _OutputDirectoryContext(
elf_object_paths=elf_object_paths,
known_inputs=known_inputs,
output_directory=output_directory,
source_mapper=source_mapper,
thin_archives=thin_archives)
if knobs.analyze_native:
section_ranges, raw_symbols, object_paths_by_name = _ParseElfInfo(
map_path,
elf_path,
tool_prefix,
track_string_literals,
outdir_context=outdir_context,
linker_name=linker_name)
else:
section_ranges, raw_symbols, object_paths_by_name = {}, [], None
elf_overhead_size = _CalculateElfOverhead(section_ranges, elf_path)
pak_symbols_by_id = None
if apk_path and size_info_prefix:
if elf_path:
section_ranges, elf_overhead_size = _ParseApkElfSectionRanges(
section_ranges, metadata, apk_elf_result)
_AddUnattributedSectionSymbols(raw_symbols, section_ranges,
apk_elf_result)
# Can modify |section_ranges|.
pak_symbols_by_id = _FindPakSymbolsFromApk(section_ranges, apk_path,
size_info_prefix, knobs)
# Can modify |section_ranges|.
dex_size, other_symbols = _ParseApkOtherSymbols(
section_ranges, apk_path, apk_so_path, resources_pathmap_path,
size_info_prefix, knobs)
if knobs.analyze_java:
dex_symbols = apkanalyzer.CreateDexSymbols(apk_path, mapping_path,
size_info_prefix)
raw_symbols.extend(dex_symbols)
# We can't meaningfully track section size of dex methods vs other, so
# just fake the size of dex methods as the sum of symbols, and make
# "dex other" responsible for any unattributed bytes.
dex_method_size = int(
round(
sum(s.pss for s in dex_symbols
if s.section_name == models.SECTION_DEX_METHOD)))
section_ranges[models.SECTION_DEX_METHOD] = (0, dex_method_size)
section_ranges[models.SECTION_DEX] = (0, dex_size - dex_method_size)
dex_other_size = int(
round(
sum(s.pss for s in dex_symbols
if s.section_name == models.SECTION_DEX)))
unattributed_dex = section_ranges[models.SECTION_DEX][1] - dex_other_size
# Compare against -5 instead of 0 to guard against round-off errors.
assert unattributed_dex >= -5, ('Dex symbols take up more space than '
'the dex sections have available')
if unattributed_dex > 0:
other_symbols.append(
models.Symbol(
models.SECTION_DEX,
unattributed_dex,
full_name='** .dex (unattributed)'))
raw_symbols.extend(other_symbols)
elif pak_files and pak_info_file:
# Can modify |section_ranges|.
pak_symbols_by_id = _FindPakSymbolsFromFiles(
section_ranges, pak_files, pak_info_file, output_directory)
if elf_path:
elf_overhead_symbol = models.Symbol(
models.SECTION_OTHER, elf_overhead_size, full_name='Overhead: ELF file')
_ExtendSectionRange(section_ranges, models.SECTION_OTHER, elf_overhead_size)
raw_symbols.append(elf_overhead_symbol)
if pak_symbols_by_id:
logging.debug('Extracting pak IDs from symbol names, and creating symbols')
object_paths_by_pak_id = {}
if knobs.analyze_native:
object_paths_by_pak_id = _CreatePakObjectMap(object_paths_by_name)
pak_raw_symbols = _ParsePakSymbols(
pak_symbols_by_id, object_paths_by_pak_id)
raw_symbols.extend(pak_raw_symbols)
_ExtractSourcePathsAndNormalizeObjectPaths(raw_symbols, source_mapper)
_PopulateComponents(raw_symbols, knobs)
logging.info('Converting excessive aliases into shared-path symbols')
_CompactLargeAliasesIntoSharedSymbols(raw_symbols, knobs)
logging.debug('Connecting nm aliases')
_ConnectNmAliases(raw_symbols)
if elf_path and knobs.relocations_mode:
_OverwriteSymbolSizesWithRelocationCount(raw_symbols, tool_prefix, elf_path)
section_sizes = {k: size for k, (address, size) in section_ranges.items()}
return section_sizes, raw_symbols
def CreateSizeInfo(section_sizes_list,
raw_symbols_list,
metadata_list,
normalize_names=True):
"""Performs operations on all symbols and creates a SizeInfo object."""
for raw_symbols in raw_symbols_list:
file_format.SortSymbols(raw_symbols)
file_format.CalculatePadding(raw_symbols)
# Do not call _NormalizeNames() during archive since that method tends to
# need tweaks over time. Calling it only when loading .size files allows for
# more flexibility.
if normalize_names:
_NormalizeNames(raw_symbols)
# TODO(huangs): Implement data fusing to compute the following for real.
assert len(section_sizes_list) == 1
section_sizes = section_sizes_list[0]
raw_symbols = raw_symbols_list[0]
metadata = metadata_list[0]
return models.SizeInfo(section_sizes, raw_symbols, metadata=metadata)
def _DetectGitRevision(directory):
"""Runs git rev-parse to get the SHA1 hash of the current revision.
Args:
directory: Path to directory where rev-parse command will be run.
Returns:
A string with the SHA1 hash, or None if an error occured.
"""
try:
git_rev = subprocess.check_output(
['git', '-C', directory, 'rev-parse', 'HEAD']).decode('ascii')
return git_rev.rstrip()
except Exception:
logging.warning('Failed to detect git revision for file metadata.')
return None
def BuildIdFromElf(elf_path, tool_prefix):
args = [path_util.GetReadElfPath(tool_prefix), '-n', elf_path]
stdout = subprocess.check_output(args).decode('ascii')
match = re.search(r'Build ID: (\w+)', stdout)
assert match, 'Build ID not found from running: ' + ' '.join(args)
return match.group(1)
def _SectionInfoFromElf(elf_path, tool_prefix):
args = [path_util.GetReadElfPath(tool_prefix), '-S', '--wide', elf_path]
stdout = subprocess.check_output(args).decode('ascii')
section_ranges = {}
# Matches [ 2] .hash HASH 00000000006681f0 0001f0 003154 04 A 3 0 8
for match in re.finditer(r'\[[\s\d]+\] (\..*)$', stdout, re.MULTILINE):
items = match.group(1).split()
section_ranges[items[0]] = (int(items[2], 16), int(items[4], 16))
return section_ranges
def _ElfIsMainPartition(elf_path, tool_prefix):
section_ranges = _SectionInfoFromElf(elf_path, tool_prefix)
return models.SECTION_PART_END in section_ranges.keys()
def _ArchFromElf(elf_path, tool_prefix):
args = [path_util.GetReadElfPath(tool_prefix), '-h', elf_path]
stdout = subprocess.check_output(args).decode('ascii')
machine = re.search('Machine:\s*(.+)', stdout).group(1)
if machine == 'Intel 80386':
return 'x86'
if machine == 'Advanced Micro Devices X86-64':
return 'x64'
elif machine == 'ARM':
return 'arm'
elif machine == 'AArch64':
return 'arm64'
return machine
def _CountRelocationsFromElf(elf_path, tool_prefix):
args = [path_util.GetObjDumpPath(tool_prefix), '--private-headers', elf_path]
stdout = subprocess.check_output(args).decode('ascii')
relocations = re.search('REL[AR]?COUNT\s*(.+)', stdout).group(1)
return int(relocations, 16)
def _ParseGnArgs(args_path):
"""Returns a list of normalized "key=value" strings."""
args = {}
with open(args_path) as f:
for l in f:
# Strips #s even if within string literal. Not a problem in practice.
parts = l.split('#')[0].split('=')
if len(parts) != 2:
continue
args[parts[0].strip()] = parts[1].strip()
return ["%s=%s" % x for x in sorted(args.items())]
def _DetectLinkerName(map_path):
with _OpenMaybeGzAsText(map_path) as map_file:
return linker_map_parser.DetectLinkerNameFromMapFile(map_file)
def _ElfInfoFromApk(apk_path, apk_so_path, tool_prefix):
"""Returns a tuple of (build_id, section_ranges, elf_overhead_size)."""
with zip_util.UnzipToTemp(apk_path, apk_so_path) as temp:
build_id = BuildIdFromElf(temp, tool_prefix)
section_ranges = _SectionInfoFromElf(temp, tool_prefix)
elf_overhead_size = _CalculateElfOverhead(section_ranges, temp)
return build_id, section_ranges, elf_overhead_size
def _AutoIdentifyInputFile(args):
if args.f.endswith('.minimal.apks'):
args.minimal_apks_file = args.f
logging.info('Auto-identified --minimal-apks-file.')
elif args.f.endswith('.apk'):
args.apk_file = args.f
logging.info('Auto-identified --apk-file.')
elif args.f.endswith('.so') or '.' not in os.path.basename(args.f):
logging.info('Auto-identified --elf-file.')
args.elf_file = args.f
elif args.f.endswith('.map') or args.f.endswith('.map.gz'):
logging.info('Auto-identified --map-file.')
args.map_file = args.f
else:
return False
return True
def AddMainPathsArguments(parser):
"""Add arguments for _DeduceMainPaths()."""
parser.add_argument('-f', metavar='FILE',
help='Auto-identify input file type.')
parser.add_argument('--apk-file',
help='.apk file to measure. Other flags can generally be '
'derived when this is used.')
parser.add_argument(
'--resources-pathmap-file',
help='.pathmap.txt file that contains a maping from '
'original resource paths to shortened resource paths.')
parser.add_argument('--minimal-apks-file',
help='.minimal.apks file to measure. Other flags can '
'generally be derived when this is used.')
parser.add_argument('--mapping-file',
help='Proguard .mapping file for deobfuscation.')
parser.add_argument('--elf-file',
help='Path to input ELF file. Currently used for '
'capturing metadata.')
parser.add_argument('--map-file',
help='Path to input .map(.gz) file. Defaults to '
'{{elf_file}}.map(.gz)?. If given without '
'--elf-file, no size metadata will be recorded.')
parser.add_argument('--no-source-paths', action='store_true',
help='Do not use .ninja files to map '
'object_path -> source_path')
parser.add_argument('--output-directory',
help='Path to the root build directory.')
parser.add_argument('--tool-prefix',
help='Path prefix for c++filt, nm, readelf.')
def AddArguments(parser):
parser.add_argument('size_file', help='Path to output .size file.')
parser.add_argument('--pak-file', action='append',
help='Paths to pak files.')
parser.add_argument('--pak-info-file',
help='This file should contain all ids found in the pak '
'files that have been passed in.')
parser.add_argument('--no-string-literals', dest='track_string_literals',
default=True, action='store_false',
help='Disable breaking down "** merge strings" into more '
'granular symbols.')
parser.add_argument(
'--relocations',
action='store_true',
help='Instead of counting binary size, count number of relative'
'relocation instructions in ELF code.')
parser.add_argument('--source-directory',
help='Custom path to the root source directory.')
parser.add_argument(
'--java-only', action='store_true', help='Run on only Java symbols')
parser.add_argument(
'--native-only', action='store_true', help='Run on only native symbols')
parser.add_argument(
'--no-java', action='store_true', help='Do not run on Java symbols')
parser.add_argument(
'--no-native', action='store_true', help='Do not run on native symbols')
parser.add_argument(
'--include-padding',
action='store_true',
help='Include a padding field for each symbol, instead of rederiving '
'from consecutive symbols on file load.')
AddMainPathsArguments(parser)
def _DeduceNativeInfo(tentative_output_dir, apk_path, elf_path, map_path,
on_config_error):
apk_so_path = None
if apk_path:
with zipfile.ZipFile(apk_path) as z:
lib_infos = [
f for f in z.infolist()
if f.filename.endswith('.so') and f.file_size > 0
]
assert lib_infos, 'APK has no .so files.'
# TODO(agrieve): Add support for multiple .so files, and take into account
# secondary architectures.
apk_so_path = max(lib_infos, key=lambda x: x.file_size).filename
logging.debug('Sub-apk path=%s', apk_so_path)
if not elf_path and tentative_output_dir:
elf_path = os.path.join(
tentative_output_dir, 'lib.unstripped',
os.path.basename(apk_so_path.replace('crazy.', '')))
logging.debug('Detected --elf-file=%s', elf_path)
if map_path:
if not map_path.endswith('.map') and not map_path.endswith('.map.gz'):
on_config_error('Expected --map-file to end with .map or .map.gz')
elif elf_path:
# Look for a .map file named for either the ELF file, or in the
# partitioned native library case, the combined ELF file from which the
# main library was extracted. Note that we don't yet have |tool_prefix| to
# use here, but that's not a problem for this use case.
if _ElfIsMainPartition(elf_path, ''):
map_path = elf_path.replace('.so', '__combined.so') + '.map'
else:
map_path = elf_path + '.map'
if not os.path.exists(map_path):
map_path += '.gz'
if not os.path.exists(map_path):
on_config_error(
'Could not find .map(.gz)? file. Ensure you have built with '
'is_official_build=true and generate_linker_map=true, or use '
'--map-file to point me a linker map file.')
return elf_path, map_path, apk_so_path
def _DeduceAuxPaths(args, apk_prefix):
mapping_path = args.mapping_file
resources_pathmap_path = args.resources_pathmap_file
if apk_prefix:
if not mapping_path:
mapping_path = apk_prefix + '.mapping'
logging.debug('Detected --mapping-file=%s', mapping_path)
if not resources_pathmap_path:
possible_pathmap_path = apk_prefix + '.pathmap.txt'
# This could be pointing to a stale pathmap file if path shortening was
# previously enabled but is disabled for the current build. However, since
# current apk/aab will have unshortened paths, looking those paths up in
# the stale pathmap which is keyed by shortened paths would not find any
# mapping and thus should not cause any issues.
if os.path.exists(possible_pathmap_path):
resources_pathmap_path = possible_pathmap_path
logging.debug('Detected --resources-pathmap-file=%s',
resources_pathmap_path)
return mapping_path, resources_pathmap_path
def _DeduceMainPaths(args, knobs, on_config_error):
"""Generates main paths (may be deduced) for each containers given by input.
Yields:
For each container, main paths and other info needed to create size_info.
"""
output_directory_finder = path_util.OutputDirectoryFinder(
value=args.output_directory,
any_path_within_output_directory=args.any_path_within_output_directory)
def _Inner(apk_prefix, apk_path):
"""Inner helper for _DeduceMainPaths(), for one container.
Params:
apk_prefix: Prefix used to search for auxiliary .apk related files.
apk_path: Path to .apk file that can be opened for processing, but whose
filename is unimportant (e.g., can be a temp file).
"""
if apk_prefix:
# Allow either .minimal.apks or just .apks.
apk_prefix = apk_prefix.replace('.minimal.apks', '.aab')
apk_prefix = apk_prefix.replace('.apks', '.aab')
mapping_path, resources_pathmap_path = _DeduceAuxPaths(args, apk_prefix)
linker_name = None
tool_prefix = None
if knobs.analyze_native:
elf_path, map_path, apk_so_path = _DeduceNativeInfo(
output_directory_finder.Tentative(), apk_path, args.elf_file,
args.map_file, on_config_error)
if map_path:
linker_name = _DetectLinkerName(map_path)
logging.info('Linker name: %s' % linker_name)
tool_prefix_finder = path_util.ToolPrefixFinder(
value=args.tool_prefix,
output_directory_finder=output_directory_finder,
linker_name=linker_name)
tool_prefix = tool_prefix_finder.Finalized()
else:
# Trust that these values will not be used, and set to None.
elf_path = None
map_path = None
apk_so_path = None
# TODO(huangs): See if this can be pulled out of _Inner().
output_directory = None
if not args.no_source_paths:
output_directory = output_directory_finder.Finalized()
size_info_prefix = None
if output_directory and apk_prefix:
size_info_prefix = os.path.join(output_directory, 'size-info',
os.path.basename(apk_prefix))
return (output_directory, tool_prefix, apk_path, mapping_path, apk_so_path,
elf_path, map_path, resources_pathmap_path, linker_name,
size_info_prefix)
# Process each container.
# If needed, extract .apk file to a temp file and process that instead.
if args.minimal_apks_file:
with zip_util.UnzipToTemp(args.minimal_apks_file, _APKS_MAIN_APK) as temp:
yield _Inner(args.minimal_apks_file, temp)
else:
yield _Inner(args.apk_file, args.apk_file)
def Run(args, on_config_error):
if not args.size_file.endswith('.size'):
on_config_error('size_file must end with .size')
if args.f is not None:
if not _AutoIdentifyInputFile(args):
on_config_error('Cannot identify file %s' % args.f)
if args.apk_file and args.minimal_apks_file:
on_config_error('Cannot use both --apk-file and --minimal-apks-file.')
# Deduce arguments.
setattr(args, 'is_bundle', args.minimal_apks_file is not None)
any_path = (args.apk_file or args.minimal_apks_file or args.elf_file
or args.map_file)
if any_path is None:
on_config_error(
'Must pass at least one of --apk-file, --minimal-apks-file, '
'--elf-file, --map-file')
setattr(args, 'any_path_within_output_directory', any_path)
knobs = SectionSizeKnobs(args.is_bundle)
knobs.ModifyWithArgs(args)
metadata_list = []
section_sizes_list = []
raw_symbols_list = []
# Generate one size info for each container.
for (output_directory, tool_prefix, apk_path, mapping_path, apk_so_path,
elf_path, map_path, resources_pathmap_path, linker_name,
size_info_prefix) in _DeduceMainPaths(args, knobs, on_config_error):
# Note that |args.apk_file| is used instead of |apk_path|, since the latter
# may be an extracted temporary file.
metadata = CreateMetadata(map_path, elf_path, args.apk_file,
args.minimal_apks_file, tool_prefix,
output_directory, linker_name)
section_sizes, raw_symbols = CreateSectionSizesAndSymbols(
map_path=map_path,
tool_prefix=tool_prefix,
elf_path=elf_path,
apk_path=apk_path,
mapping_path=mapping_path,
output_directory=output_directory,
resources_pathmap_path=resources_pathmap_path,
track_string_literals=args.track_string_literals,
metadata=metadata,
apk_so_path=apk_so_path,
pak_files=args.pak_file,
pak_info_file=args.pak_info_file,
linker_name=linker_name,
size_info_prefix=size_info_prefix,
knobs=knobs)
metadata_list.append(metadata)
section_sizes_list.append(section_sizes)
raw_symbols_list.append(raw_symbols)
size_info = CreateSizeInfo(
section_sizes_list,
raw_symbols_list,
metadata_list,
normalize_names=False)
if logging.getLogger().isEnabledFor(logging.DEBUG):
for line in describe.DescribeSizeInfoCoverage(size_info):
logging.debug(line)
logging.info('Recorded info for %d symbols', len(size_info.raw_symbols))
logging.info('Recording metadata: \n %s', '\n '.join(
describe.DescribeMetadata(size_info.metadata)))
logging.info('Saving result to %s', args.size_file)
file_format.SaveSizeInfo(
size_info, args.size_file, include_padding=args.include_padding)
size_in_mb = os.path.getsize(args.size_file) / 1024.0 / 1024.0
logging.info('Done. File size is %.2fMiB.', size_in_mb)
| [
"[email protected]"
] | |
4216ba8538130d5be7bb47ed1e6f3ccb8612f153 | db4f69e1643b61c411fee9190a3ae8f77ee2db04 | /polyaxon/api/experiment_groups/serializers.py | 3bafa236ca3b166d8ecdadbbad365de6ba88f485 | [
"MIT"
] | permissive | gzcf/polyaxon | f159c4138fee5b1f47fb57aa6bda440fe29812fb | 77ac8838c6444a36541e6c28aba7ae42de392fee | refs/heads/master | 2021-04-18T21:22:54.269899 | 2018-08-24T09:22:22 | 2018-08-24T09:22:22 | 126,830,407 | 0 | 0 | MIT | 2018-04-20T18:07:17 | 2018-03-26T13:08:02 | Python | UTF-8 | Python | false | false | 4,125 | py | from rest_framework import fields, serializers
from rest_framework.exceptions import ValidationError
from api.utils.serializers.bookmarks import BookmarkedSerializerMixin
from db.models.experiment_groups import ExperimentGroup, ExperimentGroupStatus
from libs.spec_validation import validate_group_spec_content
class ExperimentGroupStatusSerializer(serializers.ModelSerializer):
uuid = fields.UUIDField(format='hex', read_only=True)
class Meta:
model = ExperimentGroupStatus
extra_kwargs = {'experiment_group': {'read_only': True}}
exclude = []
class ExperimentGroupSerializer(serializers.ModelSerializer):
uuid = fields.UUIDField(format='hex', read_only=True)
project = fields.SerializerMethodField()
user = fields.SerializerMethodField()
class Meta:
model = ExperimentGroup
fields = (
'id',
'uuid',
'name',
'unique_name',
'user',
'description',
'last_status',
'project',
'created_at',
'updated_at',
'started_at',
'finished_at',
'tags',
'concurrency',
'search_algorithm'
)
def get_project(self, obj):
return obj.project.unique_name
def get_user(self, obj):
return obj.user.username
class ExperimentGroupDetailSerializer(ExperimentGroupSerializer, BookmarkedSerializerMixin):
bookmarked_model = 'experimentgroup'
num_experiments = fields.SerializerMethodField()
num_pending_experiments = fields.SerializerMethodField()
num_running_experiments = fields.SerializerMethodField()
num_scheduled_experiments = fields.SerializerMethodField()
num_succeeded_experiments = fields.SerializerMethodField()
num_failed_experiments = fields.SerializerMethodField()
num_stopped_experiments = fields.SerializerMethodField()
current_iteration = fields.SerializerMethodField()
class Meta(ExperimentGroupSerializer.Meta):
fields = ExperimentGroupSerializer.Meta.fields + (
'current_iteration',
'content',
'hptuning',
'has_tensorboard',
'num_experiments',
'num_pending_experiments',
'num_running_experiments',
'num_scheduled_experiments',
'num_succeeded_experiments',
'num_failed_experiments',
'num_stopped_experiments',
'bookmarked',
)
def get_num_experiments(self, obj):
return obj.experiments__count
def get_num_pending_experiments(self, obj):
return obj.pending_experiments__count
def get_num_running_experiments(self, obj):
return obj.running_experiments__count
def get_num_scheduled_experiments(self, obj):
return obj.scheduled_experiments__count
def get_num_succeeded_experiments(self, obj):
return obj.succeeded_experiments__count
def get_num_failed_experiments(self, obj):
return obj.failed_experiments__count
def get_num_stopped_experiments(self, obj):
return obj.stopped_experiments__count
def get_current_iteration(self, obj):
return obj.iterations__count
def validate_content(self, content):
validate_group_spec_content(content)
return content
def validate(self, attrs):
if self.initial_data.get('check_specification') and not attrs.get('content'):
raise ValidationError('Experiment group expects `content`.')
return attrs
class ExperimentGroupCreateSerializer(ExperimentGroupSerializer):
class Meta(ExperimentGroupSerializer.Meta):
fields = ExperimentGroupSerializer.Meta.fields + (
'search_algorithm',
'content',
)
def validate_content(self, content):
validate_group_spec_content(content)
return content
def validate(self, attrs):
if self.initial_data.get('check_specification') and not attrs.get('content'):
raise ValidationError('Experiment group expects `content`.')
return attrs
| [
"[email protected]"
] | |
011fedfc5f4fb077adf105e66276575307bcf672 | c1edf63a93d0a6d914256e848904c374db050ae0 | /Python/python进阶知识/python多线程/多窗口运行/main1.py | 0fdebedf23162e616c2378be6f031a7f3e9cca74 | [] | no_license | clhiker/WPython | 97b53dff7e5a2b480e1bf98d1b2bf2a1742cb1cd | b21cbfe9aa4356d0fe70d5a56c8b91d41f5588a1 | refs/heads/master | 2020-03-30T03:41:50.459769 | 2018-09-28T07:36:21 | 2018-09-28T07:36:21 | 150,703,520 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 66 | py | import time
while True:
print('I\'m main1')
time.sleep(2) | [
"[email protected]"
] | |
893dfeefb69b21a01de2a54510f145d36809b70b | 58bc54ce2f5d4beaac2366bf5b0bb76e51ebfda3 | /pytorch_toolbelt/datasets/segmentation.py | cadd5b4769c66e66c8cb6b0a8d058123710d1111 | [
"MIT"
] | permissive | anashas/pytorch-toolbelt | 1fbe76648719b2e1832e9fcbd0b2c30f134882cc | a04e28b10a43747ab75f88503ee771f89edf59fb | refs/heads/master | 2023-03-15T00:31:48.045880 | 2021-03-02T20:36:27 | 2021-03-02T20:36:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,487 | py | from functools import partial
from typing import Optional, List, Callable
import albumentations as A
import cv2
import numpy as np
from skimage.measure import block_reduce
from torch.utils.data import Dataset
from .common import (
read_image_rgb,
INPUT_IMAGE_KEY,
INPUT_IMAGE_ID_KEY,
INPUT_INDEX_KEY,
TARGET_MASK_WEIGHT_KEY,
TARGET_MASK_KEY,
name_for_stride,
UNLABELED_SAMPLE,
)
from ..utils import fs, image_to_tensor
__all__ = ["mask_to_bce_target", "mask_to_ce_target", "SegmentationDataset", "compute_weight_mask"]
def mask_to_bce_target(mask):
return image_to_tensor(mask, dummy_channels_dim=True).float()
def mask_to_ce_target(mask):
return image_to_tensor(mask, dummy_channels_dim=False).long()
def compute_weight_mask(mask: np.ndarray, edge_weight=4) -> np.ndarray:
from skimage.morphology import binary_dilation, binary_erosion
binary_mask = mask > 0
weight_mask = np.ones(mask.shape[:2]).astype(np.float32)
if binary_mask.any():
dilated = binary_dilation(binary_mask, structure=np.ones((5, 5), dtype=np.bool))
eroded = binary_erosion(binary_mask, structure=np.ones((5, 5), dtype=np.bool))
a = dilated & ~binary_mask
b = binary_mask & ~eroded
weight_mask = (a | b).astype(np.float32) * edge_weight + 1
weight_mask = cv2.GaussianBlur(weight_mask, ksize=(5, 5), sigmaX=5)
return weight_mask
def _block_reduce_dominant_label(x: np.ndarray, axis):
try:
# minlength is +1 to num classes because we must account for IGNORE_LABEL
minlength = np.max(x) + 1
bincount_fn = partial(np.bincount, minlength=minlength)
counts = np.apply_along_axis(bincount_fn, -1, x.reshape((x.shape[0], x.shape[1], -1)))
reduced = np.argmax(counts, axis=-1)
return reduced
except Exception as e:
print(e)
print("shape", x.shape, "axis", axis)
def read_binary_mask(mask_fname: str) -> np.ndarray:
mask = cv2.imread(mask_fname, cv2.IMREAD_COLOR)
return cv2.threshold(mask, 0, 255, cv2.THRESH_BINARY, dst=mask)
class SegmentationDataset(Dataset):
"""
Dataset class suitable for segmentation tasks
"""
def __init__(
self,
image_filenames: List[str],
mask_filenames: Optional[List[str]],
transform: A.Compose,
read_image_fn: Callable = read_image_rgb,
read_mask_fn: Callable = cv2.imread,
need_weight_mask=False,
need_supervision_masks=False,
make_mask_target_fn: Callable = mask_to_ce_target,
):
if mask_filenames is not None and len(image_filenames) != len(mask_filenames):
raise ValueError("Number of images does not corresponds to number of targets")
self.image_ids = [fs.id_from_fname(fname) for fname in image_filenames]
self.need_weight_mask = need_weight_mask
self.need_supervision_masks = need_supervision_masks
self.images = image_filenames
self.masks = mask_filenames
self.read_image = read_image_fn
self.read_mask = read_mask_fn
self.transform = transform
self.make_target = make_mask_target_fn
def __len__(self):
return len(self.images)
def set_target(self, index: int, value: np.ndarray):
mask_fname = self.masks[index]
value = (value * 255).astype(np.uint8)
cv2.imwrite(mask_fname, value)
def __getitem__(self, index):
image = self.read_image(self.images[index])
if self.masks is not None:
mask = self.read_mask(self.masks[index])
else:
mask = np.ones((image.shape[0], image.shape[1], 1), dtype=np.uint8) * UNLABELED_SAMPLE
data = self.transform(image=image, mask=mask)
image = data["image"]
mask = data["mask"]
sample = {
INPUT_INDEX_KEY: index,
INPUT_IMAGE_ID_KEY: self.image_ids[index],
INPUT_IMAGE_KEY: image_to_tensor(image),
TARGET_MASK_KEY: self.make_target(mask),
}
if self.need_weight_mask:
sample[TARGET_MASK_WEIGHT_KEY] = image_to_tensor(compute_weight_mask(mask)).float()
if self.need_supervision_masks:
for i in range(1, 5):
stride = 2 ** i
mask = block_reduce(mask, (2, 2), partial(_block_reduce_dominant_label))
sample[name_for_stride(TARGET_MASK_KEY, stride)] = self.make_target(mask)
return sample
| [
"[email protected]"
] | |
4508094ece806298cf2145b030d29774f438347a | e4f8b14cead542586a96bcaa75993b0a29b3c3d0 | /pyNastran/utils/log.py | 8b482bf35aacb394dae8997f2100b891fd92ef91 | [] | no_license | afcarl/cyNastran | f1d1ef5f1f7cb05f435eac53b05ff6a0cc95c19b | 356ee55dd08fdc9880c5ffba47265125cba855c4 | refs/heads/master | 2020-03-26T02:09:00.350237 | 2014-08-07T00:00:29 | 2014-08-07T00:00:29 | 144,398,645 | 1 | 0 | null | 2018-08-11T15:56:50 | 2018-08-11T15:56:50 | null | UTF-8 | Python | false | false | 5,063 | py | import sys
import platform
import os
def make_log(display=False):
"""
Creates 'pyNastran.log' file with information about working environment,
such as Python version, platform, architecture, etc. Useful for debugging.
:param display: do not only create file but also print log information
"""
smsg = [("sys.version", sys.version), ("sys.version_info", sys.version_info)]
pmsg = ["machine", "platform", "processor", "architecture", "python_branch",
"python_revision", "win32_ver", "version", "uname", "system",
"python_build", "python_compiler", "python_implementation", "system",
"mac_ver", "linux_distribution", "libc_ver"]
fmt = "%-{0}s = %s\n".format(max(map(len, pmsg + [j[0] for j in smsg])))
msg = "".join([fmt % (i, str(j).replace("\n", "; ")) for (i, j) in smsg])
msg += "".join([fmt % (i, str(getattr(platform, i)())) for i in pmsg])
if display:
print(msg)
with open('pyNastran.log', 'wb') as fil:
fil.write(msg)
def stderr_logging(typ, msg):
"""
Default logging function. Takes a text and outputs to stderr.
:param typ: messeage type
:param msg: message to be displayed
Message will have format 'typ: msg'
"""
name = '%-8s' % (typ + ':') # max length of 'INFO', 'DEBUG', 'WARNING',.etc.
sys.stdout.write((name + msg) if typ else msg)
sys.stdout.flush()
class SimpleLogger(object):
"""
Simple logger object. In future might be changed to use Python logging module.
Two levels are supported: 'debug' and 'info'. Info level discards debug
messages, 'debug' level displays all messages.
.. note:: Logging module is currently not supported because I don't
know how to repoint the log file if the program is called a second
time. Poor logging can result in:\n
1) double logging to a single file\n
2) all longging going to one file\n
This is really only an issue when calling logging multiple times,
such as in an optimization loop or testing.
"""
def __init__(self, level='debug', log_func=stderr_logging):
"""
:param level: level of logging: 'info' or 'debug'
:param log_func:
funtion that will be used to print log. It should take one argument:
string that is produces by a logger. Default: print messages to
stderr using @see stderr_logging function.
"""
assert level in ('info','debug')
self.level = level
self.log_func = log_func
def properties(self):
"""Return tuple: line number and filename"""
_fr = sys._getframe(3) # jump to get out of the logger code
return (_fr.f_lineno, os.path.basename(_fr.f_globals['__file__']))
def debug(self, msg):
"""
Log DEBUG message
:param msg: message to be logged
"""
if self.level != 'debug':
return
lines = str(msg).split('\n')
self.msg_typ('DEBUG', ''.join([lines[0]] + [' ' * 54 + line + '\n'
for line in lines[1:]]))
def msg_typ(self, typ, msg):
"""
Log message of a given type
:param typ: type of a message (e.g. INFO)
:param msg: message to be logged
"""
n, fn = self.properties()
self.log_func(typ, ' fname=%-25s lineNo=%-4s %s\n' % (fn, n, msg))
def simple_msg(self,msg, typ = None):
"""
Log message directly without any altering.
:param msg: message to be looged without any alteration.
"""
self.log_func(typ, msg)
def info(self, msg):
"""
Log INFO message
:param msg: message to be logged
"""
self.msg_typ("INFO", msg)
def warning(self, msg):
"""
Log WARNING message
:param msg: message to be logged
"""
self.msg_typ("WARNING", msg)
def error(self, msg):
"""
Log ERROR message
:param msg: message to be logged
"""
self.msg_typ("ERROR", msg)
def exception(self, msg):
"""
Log EXCEPTION message
:param msg: message to be logged
"""
self.msg_typ("ERROR", msg)
def critical(self, msg):
"""
Log CRITICAL message
:param msg: message to be logged
"""
self.msg_typ("CRITICAL", msg)
def get_logger(log=None, level='debug'):
"""
This function is useful as it will instantiate a simpleLogger object if log=None.
:param log: a logger object or None
:param level: level of logging: 'info' or 'debug'
"""
return SimpleLogger(level) if log is None else log
if __name__ == '__main__':
# how to use a simple logger
for nam in ["debug", "info"]:
print('--- %s logger ---' % nam)
test_log = SimpleLogger(nam)
test_log.debug('debug message')
test_log.warning('warning')
test_log.error('errors')
test_log.exception('exception')
make_log(display=True) | [
"mesheb82@abe5364a-6225-a519-111c-932ebcde5b3b"
] | mesheb82@abe5364a-6225-a519-111c-932ebcde5b3b |
18358ae17ee34ba9ba626dedeb0a03bc901ffec5 | bc526da042a8d5d2a239989efecb35fd4272e611 | /odps/ml/regression/tests/test_regression.py | 674999ee51b95dcb4d72bfbe4ee01d68c24a34df | [
"Apache-2.0"
] | permissive | forvendettaw/aliyun-odps-python-sdk | 595928fff039ae43d2736c53fc27d947def24e35 | a490a255efd0553cca4454d79ed83b777aae8888 | refs/heads/master | 2021-01-12T02:47:31.578957 | 2017-01-05T03:05:06 | 2017-01-05T03:05:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,191 | py | # encoding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
from odps.df import DataFrame
from odps.config import options
from odps.ml.utils import TEMP_TABLE_PREFIX
from odps.ml.regression import *
from odps.ml.feature import *
from odps.ml.statistics import *
from odps.ml.tests.base import MLTestBase, tn, otm, ci_skip_case
from odps.ml.metrics import *
import logging
logger = logging.getLogger(__name__)
IONOSPHERE_TABLE = tn('pyodps_test_ml_ionosphere')
XGBOOST_OUT_TABLE = tn('pyodps_test_xgboost_out')
GBDT_OUT_TABLE = tn('pyodps_test_gbdt_out')
LINEAR_REGRESSION_OUT_TABLE = tn('pyodps_test_linear_reg_out')
LINEAR_SVR_OUT_TABLE = tn('pyodps_test_linear_svr_out')
LASSO_OUT_TABLE = tn('pyodps_test_lasso_out')
RIDGE_OUT_TABLE = tn('pyodps_test_ridge_out')
MODEL_NAME = tn('pyodps_test_out_model')
class TestMLRegression(MLTestBase):
def setUp(self):
super(TestMLRegression, self).setUp()
self.create_ionosphere(IONOSPHERE_TABLE)
options.runner.dry_run = True
def test_mock_xgboost(self):
df = DataFrame(self.odps.get_table(IONOSPHERE_TABLE)).roles(label='class')
splited = df.split(0.6)
xgboost = Xgboost()
model = xgboost.train(splited[0])._add_case(self.gen_check_params_case({
'labelColName': 'class', 'modelName': MODEL_NAME, 'colsample_bytree': '1', 'silent': '1',
'eval_metric': 'error', 'eta': '0.3', 'inputTableName': TEMP_TABLE_PREFIX + '0_split_2_1', 'max_delta_step': '0',
'base_score': '0.5', 'seed': '0', 'min_child_weight': '1', 'objective': 'reg:linear',
'featureColNames': ','.join('a%02d' % i for i in range(1, 35)),
'max_depth': '6', 'gamma': '0', 'booster': 'gbtree'}))
model.persist(MODEL_NAME)
predicted = model.predict(splited[1])._add_case(self.gen_check_params_case({
'modelName': MODEL_NAME, 'appendColNames': ','.join('a%02d' % i for i in range(1, 35)) + ',class',
'outputTableName': XGBOOST_OUT_TABLE, 'inputTableName': TEMP_TABLE_PREFIX + '0_split_2_2'}))
# persist is an operational node which will trigger execution of the flow
predicted.persist(XGBOOST_OUT_TABLE)
def test_mock_gbdt(self):
df = DataFrame(self.odps.get_table(IONOSPHERE_TABLE)).roles(label='class')
splited = df.split(0.6)
gbdt = GBDT(min_leaf_sample_count=10)
model = gbdt.train(splited[0])._add_case(self.gen_check_params_case({
'tau': '0.6', 'modelName': MODEL_NAME, 'inputTableName': TEMP_TABLE_PREFIX + '0_split_2_1', 'maxLeafCount': '32',
'shrinkage': '0.05', 'featureSplitValueMaxSize': '500', 'featureRatio': '0.6', 'testRatio': '0.0',
'newtonStep': '0', 'randSeed': '0', 'sampleRatio': '0.6', 'p': '1', 'treeCount': '500', 'metricType': '2',
'labelColName': 'class', 'featureColNames': ','.join('a%02d' % i for i in range(1, 35)),
'minLeafSampleCount': '10', 'lossType': '3', 'maxDepth': '11'}))
model.persist(MODEL_NAME)
predicted = model.predict(splited[1])._add_case(self.gen_check_params_case({
'modelName': MODEL_NAME, 'appendColNames': ','.join('a%02d' % i for i in range(1, 35)) + ',class',
'outputTableName': GBDT_OUT_TABLE, 'inputTableName': TEMP_TABLE_PREFIX + '0_split_2_2'}))
# persist is an operational node which will trigger execution of the flow
predicted.persist(GBDT_OUT_TABLE)
@ci_skip_case
def test_linear(self):
options.runner.dry_run = False
self.delete_table(LINEAR_REGRESSION_OUT_TABLE)
self.delete_offline_model(MODEL_NAME)
df = DataFrame(self.odps.get_table(IONOSPHERE_TABLE)).roles(label='class')
splited = df.split(0.6)
algo = LinearRegression()
model = algo.train(splited[0])
model.persist(MODEL_NAME)
logging.info('Importance: ', regression_importance(splited[1], model))
predicted = model.predict(splited[1])
# persist is an operational node which will trigger execution of the flow
predicted.persist(LINEAR_REGRESSION_OUT_TABLE)
logging.info('MSE: ', mean_squared_error(predicted, 'class'))
logging.info('MAE: ', mean_absolute_error(predicted, 'class'))
logging.info('HIST: ', residual_histogram(predicted, 'class'))
logging.info('MSE: ', pearson(predicted, col1='class'))
| [
"[email protected]"
] | |
560210583dcd9e410b9a6d3ce7eccb94b910daaf | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_1_neat/16_0_1_ashuwp_A.py | 2b4922b913d674d05b48e7ff31fca4b01cb5237e | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 441 | py | def sheep(num):
if num == 0:
return "INSOMNIA"
itr = tr = 0
while tr != 1023:
cnt = 0
itr += 1
temp = str(num * itr)
for k in temp:
cnt |= 1 << int(k)
tr |= cnt
return num * itr
if __name__ == "__main__":
tc = int(input())
for i in range(tc):
n = int(input())
print("Case #{}: {}".format(i + 1, sheep(n)))
| [
"[[email protected]]"
] | |
8d91d0d9ca4c29272ed50703feb949fda56ecabb | f3b233e5053e28fa95c549017bd75a30456eb50c | /bace_input/L4I/4I-3N_MD_NVT_rerun/set.py | b04b07be60fd9a603e5f9ea04342eddcacd9da7f | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,082 | py | import os
dir = '/mnt/scratch/songlin3/run/bace/L4I/MD/ti_one-step/4I_3N/'
filesdir = dir + 'files/'
temp_equiin = filesdir + 'temp_equi.in'
temp_prodin = filesdir + 'temp_prod.in'
temp_pbs = filesdir + 'temp.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.system("rm -r %6.5f" %(j))
os.system("mkdir %6.5f" %(j))
os.chdir("%6.5f" %(j))
os.system("rm *")
workdir = dir + "%6.5f" %(j) + '/'
#equiin
eqin = workdir + "%6.5f_equi.in" %(j)
os.system("cp %s %s" %(temp_equiin, eqin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, eqin))
#prodin
prodin = workdir + "%6.5f_prod.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#top
os.system("cp ../4I-3N_merged.prmtop .")
os.system("cp ../0.5_equi_0.rst .")
#submit pbs
os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"[email protected]"
] | |
3aa435659cdd66419cc6a2c5579e721ad5ecf45d | 4b3ae6048ced0d7f88a585af29fa3a7b15005749 | /Python/Django/AJAX/user_login/apps/orm_app/migrations/0001_initial.py | fed103cb60fdb32297924849629a3e26c80f3a0a | [] | no_license | ajag408/DojoAssignments | a6320856466ac21d38e8387bdcbbe2a02009e418 | 03baa0ff5261aee6ffedf724657b3a8c7cdffe47 | refs/heads/master | 2022-12-11T15:50:46.839881 | 2021-06-07T20:57:17 | 2021-06-07T20:57:17 | 79,872,914 | 0 | 0 | null | 2022-12-08T00:35:09 | 2017-01-24T02:58:15 | Python | UTF-8 | Python | false | false | 877 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-07-25 03:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=255)),
('last_name', models.CharField(max_length=255)),
('email_address', models.CharField(max_length=255)),
('age', models.IntegerField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
| [
"[email protected]"
] | |
a8785d9e209ad0d74353f91f65a34d7c4e5ab111 | 3e45ea5b84fdce1d1c391929e6e95c5ecbfdbf98 | /day03/app03_1/migrations/0005_animal_cat_dog.py | 293a57087d20852f2a63c1505c8803080066aa6e | [
"Apache-2.0"
] | permissive | General-Coder/Django-Introduction | 3cc75bc2098a0f90769d375aeee8f999a4f6fcc6 | e88b12682f9abc46a90a0fc79e7443537230a506 | refs/heads/master | 2020-04-05T07:34:44.540644 | 2018-11-11T14:30:12 | 2018-11-11T14:30:12 | 156,681,232 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,630 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-10-24 16:46
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app03_1', '0004_teacher_xz'),
]
operations = [
migrations.CreateModel(
name='Animal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('color', models.CharField(max_length=30)),
('gender', models.CharField(max_length=30)),
('age', models.IntegerField()),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Cat',
fields=[
('animal_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='app03_1.Animal')),
('type', models.CharField(max_length=20)),
],
bases=('app03_1.animal',),
),
migrations.CreateModel(
name='Dog',
fields=[
('animal_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='app03_1.Animal')),
('size', models.CharField(max_length=20)),
],
bases=('app03_1.animal',),
),
]
| [
"[email protected]"
] | |
dff4df7f2b57a2ea5b48b4b41f6928afa2de7294 | 3a9f2b3d79cf214704829427ee280f4b49dca70a | /saigon/rat/u/fm/_report_mgmt_cfg.py | 031da9c2287b3c40022bf4816bc7505478d10035 | [] | no_license | jichunwei/MyGitHub-1 | ae0c1461fe0a337ef459da7c0d24d4cf8d4a4791 | f826fc89a030c6c4e08052d2d43af0b1b4b410e3 | refs/heads/master | 2021-01-21T10:19:22.900905 | 2016-08-20T03:34:52 | 2016-08-20T03:34:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,184 | py | '''
This tea to work with Reports page.
1. Reports foundation development
+ to cover those pages: Device View, Active Firmware, Historical
Connectivity, Association, Provision, Events, Speed Flex
+ to provide basic report activities:
+ fill in report options (inc. filters)
+ generate the report
+ get the report results
+ unsupported features: save reports, export reports
Examples to generate report:
tea.py u.fm.report_mgmt fm_ip=192.168.20.252 action=generate report_param=dv_report_zd_params
tea.py u.fm.report_mgmt fm_ip=192.168.20.252 action=generate report_param=dv_report_ap_params
tea.py u.fm.report_mgmt fm_ip=192.168.20.252 action=generate report_param=connectivity_report_zd_params
tea.py u.fm.report_mgmt fm_ip=192.168.20.252 action=generate report_param=connectivity_report_ap_params
tea.py u.fm.report_mgmt fm_ip=192.168.20.252 action=generate report_param=provision_report_params
tea.py u.fm.report_mgmt fm_ip=192.168.20.252 action=generate report_param=events_report_params
tea.py u.fm.report_mgmt fm_ip=192.168.20.252 action=generate report_param=speed_flex_report_params
Examples to create report:
Example for report option and filter options
save_cfg = dict(include_filter = True, # False
include_header = True, # False
schedule = True, # False
frequency = 'Weekly', # | 'Weekly' | 'Monthly',
day_of_week = 'Monday',
time_of_day = '3:00', # '2:00', '3:00', ...
am_pm = 'PM', # 'PM'
email_report = '[email protected]',
)
advance_cfg = dict(include_filter = True, # False
include_header = True, # False
schedule = True, # False
frequency = 'Monthly', # | 'Weekly' | 'Monthly',
day_of_month = 1,
time_of_day = '3:00', # '2:00', '3:00', ...
am_pm = 'AM', # 'PM'
email_report = '[email protected]',
)
'''
import copy
#-------------------------------------------------------------------------------
# It is too long to write these params and their values on command line.
# So define them here for generating/creating reports.
general_save_cfg = dict(
include_filter = True,
include_header = True,
time_of_day = '6:00', # '2:00', '3:00', ...
am_pm = 'AM', # 'PM'
email_report = '[email protected]',
)
save_cfg_daily_type = dict(
schedule = True,
frequency = 'Daily', # | 'Weekly' | 'Monthly',
)
save_cfg_daily_type.update(general_save_cfg)
save_cfg_weekly_type = dict(
schedule = True, # False
frequency = 'Weekly', # | 'Weekly' | 'Monthly',
day_of_week = 'Monday',
)
save_cfg_weekly_type.update(general_save_cfg)
save_cfg_monthly_type = dict(
schedule = True, # False
frequency = 'Monthly', # | 'Weekly' | 'Monthly',
day_of_month = 1,
)
save_cfg_monthly_type.update(general_save_cfg)
################################################################################
# NOTE: Currently cannot generate/create report with filters "Model Name" and
# "Connection". Bug: 15203
################################################################################
#1. params to generate a report and get it result from Report Categories
dv_report_zd_params = dict(
#action = 'generate',
report_type = 'device_view',
get_result = True,
report_options = [
'All ZoneDirectors', 'ZoneDirectors',
],
filter_options = [
['ZoneDirector Name', 'Contains', 'Ruckus'],
['Version', 'Contains', '9.0']
],
save_cfg = save_cfg_daily_type,
)
# params to create/generate zd Device View report from Saved Reports
manage_dv_report_zd_params = copy.deepcopy(dv_report_zd_params)
manage_dv_report_zd_params.update(
report_options = [
'Device View', 'All ZoneDirectors', 'ZoneDirectors',
],
)
dv_report_ap_params = dict(
#action = 'generate',
report_type = 'device_view',
get_result = True,
report_options = [
'All Standalone APs', 'Currently Connected',
],
filter_options = [
['Device Name', 'Contains', 'Ruckus'],
['Uptime', 'Greater than', '1', 'Hours']
],
save_cfg = save_cfg_weekly_type,
)
# params to create/generate ap Device View report from Saved Reports
manage_dv_report_ap_params = copy.deepcopy(dv_report_ap_params)
manage_dv_report_ap_params.update(
report_options = [
'Device View', 'All Standalone APs', 'Currently Connected',
],
)
connectivity_report_zd_params = dict(
#action = 'generate',
report_type = 'connectivity',
get_result = True,
report_options = [
'All ZoneDirectors', 'Disconnected ZoneDirectors', # 'Connected ZoneDirectors',
],
filter_options = [
['Device Last Seen', 'Earlier than', '2010-07-26', '06:00:00 AM'],
],
save_cfg = save_cfg_monthly_type,
)
manage_connectivity_report_zd_params = copy.deepcopy(connectivity_report_zd_params)
manage_connectivity_report_zd_params.update(
report_options = [
'Historical Connectivity', 'All ZoneDirectors', 'Disconnected ZoneDirectors',
],
)
connectivity_report_ap_params = dict(
#action = 'generate',
report_type = 'connectivity',
get_result = True,
report_options = [
'All Standalone APs', 'Connected',
],
filter_options = [
['Uptime', 'Greater than', 5, 'Hours'],
['Software', 'Contains', '9.0']
],
save_cfg = save_cfg_daily_type,
)
manage_connectivity_report_ap_params = copy.deepcopy(connectivity_report_ap_params)
manage_connectivity_report_ap_params.update(
report_options = [
'Historical Connectivity', 'All Standalone APs', 'Connected',
],
)
# report params for Provision report
provision_report_params = dict(
#action = 'generate',
report_type = 'provision',
get_result = True,
report_options = [
'Configuration Upgrade',
],
filter_options = [
['Created by', 'Starts with', 'admin'],
],
save_cfg = save_cfg_weekly_type,
)
manage_provision_report_params = copy.deepcopy(provision_report_params)
manage_provision_report_params.update(
report_options = [
'Provision', 'Configuration Upgrade',
],
)
# report params for Events report
events_report_params = dict(
#action = 'generate',
report_type = 'events',
get_result = True,
report_options = [
'Events', 'Standalone APs',
'Value changed due to configuration request'
],
filter_options = [
['IP Address', 'Starts with', '192.168']
],
save_cfg = save_cfg_monthly_type,
)
manage_events_report_params = copy.deepcopy(events_report_params)
manage_events_report_params.update(
report_options = [
'Events', 'Events', 'Standalone APs',
'Value changed due to configuration request'
],
)
# report params for Speed Flex report
speed_flex_report_params = dict(
#action = 'generate',
report_type = 'speed_flex',
get_result = True,
report_options = None,
filter_options = [
['Executor', 'Starts with', 'admin']
],
save_cfg = save_cfg_daily_type,
)
manage_speed_flex_report_params = copy.deepcopy(speed_flex_report_params)
manage_speed_flex_report_params.update(
report_options = [
'Speed Flex',
],
)
| [
"[email protected]"
] | |
96aa5a1461e19e2949d8c3ae4a84b8a9c7751ff7 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02255/s769034459.py | 6f0767f2d12bd0d4ef95fa9cd88e9316da5284d9 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 587 | py | import sys
def insert_sort(array):
for i in range(0, len(array)):
v = array[i]
j = i - 1
while (j >= 0) and (array[j] > v):
array[j + 1] = array[j]
j -= 1
array[j + 1] = v
print print_list(array)
return array
def print_list(array):
s = ""
for n in array:
s += str(n) + " "
s = s.strip()
return s
if __name__ == "__main__":
array_num = int(sys.stdin.readline().strip())
array = map(lambda x: int(x), list(sys.stdin.readline().strip().split(" ")))
array = insert_sort(array) | [
"[email protected]"
] | |
9621f6de9e603b381c794751a9e39256ceb86e62 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_intermarrying.py | 448779925828137d3486fd5f49c4d3a4f0523579 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 242 | py |
#calss header
class _INTERMARRYING():
def __init__(self,):
self.name = "INTERMARRYING"
self.definitions = intermarry
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['intermarry']
| [
"[email protected]"
] | |
f3d200f5ad3c364311c5a8d6f245b9b7602b099e | 31009efe0b3882551f03dcaa9c71756c7c6f6ede | /src/main/resources/twisted/test/stdio_test_loseconn.py | 7f95a016b4a41e7ef1c45723dd2ebd0778ee341e | [
"Apache-2.0",
"ZPL-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | riyafa/autobahntestsuite-maven-plugin | b533433c75f7daea2757158de54c6d80d304a962 | 737e6dad2d3ef794f30f0a2013a77e28decd2ec4 | refs/heads/master | 2020-08-16T13:31:39.349124 | 2019-10-16T09:20:55 | 2019-10-16T09:20:55 | 215,506,990 | 0 | 0 | Apache-2.0 | 2019-10-16T09:18:34 | 2019-10-16T09:18:34 | null | UTF-8 | Python | false | false | 1,514 | py | # -*- test-case-name: twisted.test.test_stdio.StandardInputOutputTestCase.test_loseConnection -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Main program for the child process run by
L{twisted.test.test_stdio.StandardInputOutputTestCase.test_loseConnection} to
test that ITransport.loseConnection() works for process transports.
"""
import sys, _preamble
from twisted.internet.error import ConnectionDone
from twisted.internet import stdio, protocol
from twisted.python import reflect, log
class LoseConnChild(protocol.Protocol):
exitCode = 0
def connectionMade(self):
self.transport.loseConnection()
def connectionLost(self, reason):
"""
Check that C{reason} is a L{Failure} wrapping a L{ConnectionDone}
instance and stop the reactor. If C{reason} is wrong for some reason,
log something about that in C{self.errorLogFile} and make sure the
process exits with a non-zero status.
"""
try:
try:
reason.trap(ConnectionDone)
except:
log.err(None, "Problem with reason passed to connectionLost")
self.exitCode = 1
finally:
reactor.stop()
if __name__ == '__main__':
reflect.namedAny(sys.argv[1]).install()
log.startLogging(file(sys.argv[2], 'w'))
from twisted.internet import reactor
protocol = LoseConnChild()
stdio.StandardIO(protocol)
reactor.run()
sys.exit(protocol.exitCode)
| [
"[email protected]"
] | |
2f98cf3db55c973b80270b85c82ad4d83257056f | 155b6c640dc427590737750fe39542a31eda2aa4 | /api-test/xqkj/test3/testXqkj_web_finance_consumption_001_Channel.py | 7fc89567edec5d212484bde674ad99ca55324de3 | [] | no_license | RomySaber/api-test | d4b3add00e7e5ed70a5c72bb38dc010f67bbd981 | 028c9f7fe0d321db2af7f1cb936c403194db850c | refs/heads/master | 2022-10-09T18:42:43.352325 | 2020-06-11T07:00:04 | 2020-06-11T07:00:04 | 271,468,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,113 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time :2019-05-09 下午 2:27
@Author : 罗林
@File : testXqkj_web_finance_consumption_001_Channel.py
@desc : 渠道管理流程自动化测试用例
"""
import json
from faker import Faker
from common.myCommon import Assertion
from common.myCommon.TestBaseCase import TestBaseCase
from xqkj.query import xqkj_query
from xqkj.testAction import Xqkj_web_finance_consumptionAction as PlatformAction
from xqkj.testAction import loginAction
global_dict = loginAction.global_dict
fake = Faker("zh_CN")
channelname = fake.company() + '渠道' + loginAction.sign
name = fake.name_male() + loginAction.sign
email = loginAction.sign + fake.email()
mobile = '15388188697'
cardnumber = fake.credit_card_number(card_type=None)
# 子渠道
channelname_stream = channelname + '下级' + loginAction.sign
class testXqkj_web_finance_consumption_001_Channel(TestBaseCase):
def test_01_api_78dk_platform_cm_base_saveChannel(self):
# 添加渠道成功
res = json.loads(PlatformAction.test_api_78dk_platform_cm_base_saveChannel(
city="510100", name=channelname, province="510000", region="510107", shortname=channelname + '简称',
parentchanneluuid=''))
Assertion.verity(res['msg'], '成功')
Assertion.verity(res['code'], '10000')
Assertion.verityContain(res['data'], 'freezeStateName')
Assertion.verityContain(res['data'], 'openCloseStateName')
global_dict.set(channelid=res['data']['channelUuid'])
def test_02_api_78dk_platform_cm_base_legal_saveLegalPerson(self):
# 添加法人代表
global channelid
channelid = global_dict.get('channelid')
res1 = PlatformAction.test_api_78dk_platform_cm_base_legal_saveLegalPerson(
cardnumber=cardnumber, channelormerchantuuid=channelid, legalpersonuuid='',
mobile=mobile, name=name)
Assertion.verity(json.loads(res1)['code'], '10000')
Assertion.verity(json.loads(res1)['msg'], '成功')
def test_03_api_78dk_platform_cm_base_legal_viewLegalPerson(self):
# 查询法人代表根据渠道uuid
res = json.loads(PlatformAction.test_api_78dk_platform_cm_base_legal_viewLegalPersonByChannel(channelid))
Assertion.verity(res['code'], '10000')
Assertion.verity(res['msg'], '成功')
Assertion.verityContain(res['data'], 'channelOrMerchantUuid')
Assertion.verityContain(res['data'], 'legalPersonUuid')
global legal_person_uuid
legal_person_uuid = res['data']['legalPersonUuid']
global channelOrMerchantUuid
channelOrMerchantUuid = res['data']['channelOrMerchantUuid']
def test_04_api_78dk_platform_cm_base_legal_updateLegalPerson(self):
# 编辑法人代表
res = PlatformAction.test_api_78dk_platform_cm_base_legal_updateLegalPerson(
cardnumber=cardnumber, channelormerchantuuid=channelid, legalpersonuuid=legal_person_uuid,
mobile=mobile, name=name)
Assertion.verity(json.loads(res)['code'], '10000')
Assertion.verity(json.loads(res)['msg'], '成功')
def test_05_api_78dk_platform_cm_base_viewChannel_all(self):
# 查询渠道失败
res = PlatformAction.test_api_78dk_platform_cm_base_viewChannel('')
Assertion.verity(json.loads(res)['msg'], '您提交的参数异常')
Assertion.verity(json.loads(res)['code'], '20000')
def test_06_api_78dk_platform_cm_base_viewChannel_not_exist(self):
# 查询不存在渠道
res = PlatformAction.test_api_78dk_platform_cm_base_viewChannel(str(fake.latitude()))
Assertion.verity(json.loads(res)['code'], '10000')
Assertion.verity(json.loads(res)['msg'], '成功')
def test_07_api_78dk_platform_cm_base_viewChannel_all(self):
# 查询所有渠道
res = PlatformAction.test_api_78dk_platform_cm_base_viewChannel(channelid)
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
Assertion.verity(json.loads(res)['data']['name'], channelname)
def test_08_api_78dk_platform_cm_base_viewChannels(self):
# 查询对应名称的 渠道列表
res = PlatformAction.test_api_78dk_platform_cm_base_viewChannels(pagecurrent=1, name=channelname, pagesize=8)
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
def test_09_api_78dk_platform_cm_base_viewChannels_all(self):
# 渠道列表全部
res = PlatformAction.test_api_78dk_platform_cm_base_viewChannels(pagecurrent=1, name='', pagesize=10)
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
Assertion.verityNot(json.loads(res)['data']['totalCount'], 0, '数量至少大于等于1')
# def test_10_api_78dk_platform_cm_base_saveChannel_no_examine(self):
# # 未审核添加子渠道 (应该失败)
# res = json.loads(PlatformAction.test_api_78dk_platform_cm_base_saveChannel(
# city="510100", name=channelname_stream, province="510000", region="510107",
# shortname=channelname_stream + '简称',
# parentchanneluuid=channelid))
# Assertion.verity(res['msg'], '成功')
# Assertion.verity(res['code'], '20000')
def test_11_api_78dk_platform_cm_base_updateChannel(self):
# 编辑渠道
res = PlatformAction.test_api_78dk_platform_cm_base_updateChannel(
channeluuid=channelid, city="510100", name=channelname, note='备注', province="510000", region="510107",
shortname=channelname + '简称', operatoruuid='')
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
def test_12_api_78dk_platform_cm_state_updateFreezeState_freeze(self):
# 冻结渠道
res = PlatformAction.test_api_78dk_platform_cm_state_updateFreezeState(channelid, 'freeze')
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
def test_13_api_78dk_platform_cm_state_updateFreezeState_normal(self):
# 解冻渠道
res = PlatformAction.test_api_78dk_platform_cm_state_updateFreezeState(channelid, 'normal')
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
def test_14_api_78dk_platform_cm_examine_viewExamineChannels(self):
# 渠道审核列表
res = PlatformAction.test_api_78dk_platform_cm_examine_viewExamineChannels('123', 1, 8)
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
def test_15_api_78dk_platform_cm_examine_viewExamineChannels_all(self):
# 渠道审核列表全部
res = PlatformAction.test_api_78dk_platform_cm_examine_viewExamineChannels(name='', pagecurrent=1, pagesize=8)
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
def test_16_api_78dk_platform_cm_examine_examine_false(self):
# 渠道审核不通过
res = PlatformAction.test_api_78dk_platform_cm_examine_examine(
isadopt='false', message='不过', uid=channelid)
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
# def test_17_api_78dk_platform_cm_base_saveChannel_examine_false(self):
# # 渠道审核不通过后添加子渠道失败
# res = json.loads(PlatformAction.test_api_78dk_platform_cm_base_saveChannel(
# city="510100", name=channelname_stream, province="510000", region="510107",
# shortname=channelname_stream + '简称',
# parentchanneluuid=channelid))
# Assertion.verity(res['msg'], '成功')
# Assertion.verity(res['code'], '20000')
def test_18_api_78dk_platform_cm_examine_examine_true(self):
# 渠道审核通过
xqkj_query.update_info('Tbl_ChannelProfile', 'audit_state="pending_review"',
'channel_uuid="{}"'.format(channelid))
res = PlatformAction.test_api_78dk_platform_cm_examine_examine(isadopt='true', message='通过', uid=channelid)
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
def test_19_api_78dk_platform_cm_state_updateFreezeState_freeze(self):
# 冻结渠道
res = PlatformAction.test_api_78dk_platform_cm_state_updateFreezeState(channelid, 'freeze')
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
# def test_20_api_78dk_platform_cm_base_saveChannel_freeze(self):
# # 冻结渠道后添加子渠道失败
# res = json.loads(PlatformAction.test_api_78dk_platform_cm_base_saveChannel(
# city="510100", name=channelname_stream, province="510000", region="510107",
# shortname=channelname_stream + '简称',
# parentchanneluuid=channelid))
# Assertion.verity(res['msg'], '成功')
# Assertion.verity(res['code'], '20000')
def test_21_api_78dk_platform_cm_state_updateFreezeState_normal(self):
# 解冻渠道
res = PlatformAction.test_api_78dk_platform_cm_state_updateFreezeState(channelid, 'normal')
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
def test_22_api_78dk_platform_cm_state_updateOpenCloseState_close(self):
# 渠道开关状态为close
res = PlatformAction.test_api_78dk_platform_cm_state_updateOpenCloseState(uid=channelid,
updatestate='close')
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
# def test_23_api_78dk_platform_cm_base_saveChannel_state_close(self):
# # 渠道开关状态为close后添加子渠道失败
# res = json.loads(PlatformAction.test_api_78dk_platform_cm_base_saveChannel(
# city="510100", name=channelname_stream, province="510000", region="510107",
# shortname=channelname_stream + '简称',
# parentchanneluuid=channelid))
# Assertion.verity(res['msg'], '成功')
# Assertion.verity(res['code'], '20000')
def test_24_api_78dk_platform_cm_state_updateOpenCloseState_open(self):
# 渠道开关状态为open
res = PlatformAction.test_api_78dk_platform_cm_state_updateOpenCloseState(uid=channelid,
updatestate='open')
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
def test_25_api_78dk_platform_cm_base_saveChannel_city_none(self):
# 城市为空添加子渠道失败
res = json.loads(PlatformAction.test_api_78dk_platform_cm_base_saveChannel(
city="", name=channelname_stream, province="510000", region="510107",
shortname=channelname_stream + '简称',
parentchanneluuid=channelid))
Assertion.verity(res['msg'], '渠道所属城市不能为空,')
Assertion.verity(res['code'], '20000')
def test_26_api_78dk_platform_cm_base_saveChannel_shortname_none(self):
# ShortName为空添加子渠道失败
res = json.loads(PlatformAction.test_api_78dk_platform_cm_base_saveChannel(
city="510100", name=channelname_stream, province="510000", region="510107",
shortname='',
parentchanneluuid=channelid))
Assertion.verity(res['msg'], 'ShortName不能为空!')
Assertion.verity(res['code'], '20000')
def test_27_api_78dk_platform_cm_base_saveChannel_name_none(self):
# Name为空添加子渠道失败
res = json.loads(PlatformAction.test_api_78dk_platform_cm_base_saveChannel(
city="510100", name='', province="510000", region="510107",
shortname=channelname_stream + '简称',
parentchanneluuid=channelid))
Assertion.verity(res['msg'], 'Name不能为空!')
Assertion.verity(res['code'], '20000')
def test_28_api_78dk_platform_cm_base_saveChannel_province_none(self):
# 省份为空添加子渠道失败
res = json.loads(PlatformAction.test_api_78dk_platform_cm_base_saveChannel(
city="510100", name=channelname_stream, province="", region="510107",
shortname=channelname_stream + '简称',
parentchanneluuid=channelid))
Assertion.verity(res['msg'], '渠道所属省份不能为空,')
Assertion.verity(res['code'], '20000')
def test_29_api_78dk_platform_cm_base_saveChannel_region_none(self):
# 所属大区为空添加子渠道失败
res = json.loads(PlatformAction.test_api_78dk_platform_cm_base_saveChannel(
city="510100", name=channelname_stream, province="510000", region="",
shortname=channelname_stream + '简称',
parentchanneluuid=channelid))
Assertion.verity(res['msg'], '所属大区不能为空,')
Assertion.verity(res['code'], '20000')
def test_30_api_78dk_platform_cm_base_saveChannel(self):
# 添加子渠道成功
res = json.loads(PlatformAction.test_api_78dk_platform_cm_base_saveChannel(
city="510100", name=channelname_stream, province="510000", region="510107",
shortname=channelname_stream + '简称',
parentchanneluuid=channelid))
Assertion.verity(res['msg'], '成功')
Assertion.verity(res['code'], '10000')
Assertion.verityContain(res['data'], 'freezeStateName')
Assertion.verityContain(res['data'], 'openCloseStateName')
global channelid_stream
channelid_stream = res['data']['channelUuid']
# global_dict.set(channelid_stream=res['data']['channelUuid'])
def test_31_api_78dk_platform_cm_base_updateChannel(self):
# 编辑子渠道
# global channelid_stream
# channelid_stream = global_dict.get('channelid_stream')
res = PlatformAction.test_api_78dk_platform_cm_base_updateChannel(
channeluuid=channelid_stream, city="510100", name=channelname_stream, note='备注', province="510000",
region="510107", shortname=channelname_stream + '简称', operatoruuid='')
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
def test_32_api_78dk_platform_cm_state_updateFreezeState_freeze(self):
# 冻结子渠道
res = PlatformAction.test_api_78dk_platform_cm_state_updateFreezeState(channelid_stream, 'freeze')
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
def test_33_api_78dk_platform_cm_state_updateFreezeState_normal(self):
# 解冻子渠道
res = PlatformAction.test_api_78dk_platform_cm_state_updateFreezeState(channelid_stream, 'normal')
Assertion.verity(json.loads(res)['msg'], '成功')
Assertion.verity(json.loads(res)['code'], '10000')
# def test_34_api_78dk_platform_cm_examine_examine_false(self):
# # 已审核通过的接口再次调用接口使渠道审核不过(应该失败)
# xqkj_query.update_info('Tbl_ChannelProfile', 'audit_state="pending_review"',
# 'channel_uuid="{}"'.format(channelid))
# res = PlatformAction.test_api_78dk_platform_cm_examine_examine(isadopt='false', message='不过',
# uid=channelid)
# Assertion.verity(json.loads(res)['msg'], '成功')
# Assertion.verity(json.loads(res)['code'], '20000')
def test_35_api_78dk_platform_cm_base_legal_viewLegalPerson_fial(self):
# 查询法人代表根据渠道uuid
res = json.loads(PlatformAction.test_api_78dk_platform_cm_base_legal_viewLegalPersonByChannel(''))
Assertion.verity(res['code'], '20000')
Assertion.verity(res['msg'], '您提交的参数异常')
# # 机构相关接口 需要先有渠道
def test_36_api_78dk_platform_cm_base_business_saveBusinessInfor(self):
# 新增机构
res = PlatformAction.test_api_78dk_platform_cm_base_business_saveBusinessInfor(
businessaddress='天府软件园', businessaddressgpsloction='天府软件园GPS地址', businessaddresszipcode='000000',
businesshoursendtime='18:30', businesshoursstarttime='08:30', businessinformationuuid='',
businessregistrationnumber='443534534543', channelormerchantuuid=channelid, documentaddress='天府软件园',
email=email, organizationcode='567657675765', socialunifiedcreditcode='34534543534',
storerentalendtime='2019-01-12', storerentalstarttime='2018-01-12', taxregistrationnumber='34543543543',
documentprovince=510000, documentcity=510100, documentregion=510104, documentprovincename='',
documentcityname='', documentregionname='', businessprovince=510000, businesscity=510100,
businessregion=510104, businessprovincename='', businesscityname='', businessregionname='')
Assertion.verity(json.loads(res)['code'], '10000')
Assertion.verity(json.loads(res)['msg'], '成功')
def test_37_api_78dk_platform_cm_base_business_viewBusinessInforByChannel(self):
# 根据渠道id查询
res = json.loads(PlatformAction.test_api_78dk_platform_cm_base_business_viewBusinessInforByChannel(channelid))
Assertion.verity(res['code'], '10000')
Assertion.verity(res['msg'], '成功')
Assertion.verityContain(res['data'], 'businessInformationUuid')
global businnessid
businnessid = res['data']['businessInformationUuid']
def test_38_api_78dk_platform_cm_base_business_updateBusinessInfor(self):
# 编辑业务机构
res = PlatformAction.test_api_78dk_platform_cm_base_business_updateBusinessInfor(
businessaddress='天府软件园', businessaddressgpsloction='天府软件园GPS地址', businessaddresszipcode='000000',
businesshoursendtime='18:30', businesshoursstarttime='08:30', businessinformationuuid=businnessid,
businessregistrationnumber='443534534543', channelormerchantuuid=channelid, documentaddress='天府软件园',
email=email, organizationcode='567657675765', socialunifiedcreditcode='34534543534',
storerentalendtime='2019-01-12', storerentalstarttime='2018-01-12', taxregistrationnumber='34543543543',
documentprovince=510000, documentcity=510100, documentregion=510104, documentprovincename='',
documentcityname='', documentregionname='', businessprovince=510000, businesscity=510100,
businessregion=510104, businessprovincename='', businesscityname='', businessregionname='')
Assertion.verity(json.loads(res)['code'], '10000')
Assertion.verity(json.loads(res)['msg'], '成功')
# def test_39_api_78dk_platform_cm_base_clear_viewClearingAccountByChannel(self):
# # 根据渠道Uid查询渠道结算
# res = PlatformAction.test_api_78dk_platform_cm_base_clear_viewClearingAccountByChannel(channelid)
# Assertion.verity(json.loads(res)['code'], '10000')
# Assertion.verity(json.loads(res)['msg'], '成功')
# Assertion.verity(json.loads(res)['data']['channelOrMerchantUuid'], channelOrMerchantUuid)
def test_40_api_78dk_platform_mm_viewImageRoleList(self):
# 影像资料权限
res = PlatformAction.test_api_78dk_platform_mm_viewImageRoleList(subdivisiontype='', uid=channelid)
Assertion.verity(json.loads(res)['code'], '10000')
Assertion.verity(json.loads(res)['msg'], '成功')
def test_41_api_78dk_platform_cm_base_clear_saveClearingAccount(self):
# 新增结算信息
res = PlatformAction.test_api_78dk_platform_cm_base_clear_saveClearingAccount(
accountname=channelname, accountnumber='6011826564542944', accountopeningbank='农业银行',
accounttype='public_accounts', branchname='支行名称', chamberlainidcard='431081199812097872',
channelormerchantuuid=channelid, city='510100', clearingaccountuuid='',
linenumber='6756765756', phone='15179366892', province='510000', region='510101')
Assertion.verity(json.loads(res)['code'], '10000')
Assertion.verity(json.loads(res)['msg'], '成功')
def test_42_api_78dk_platform_cm_base_clear_viewClearingAccountByChannel(self):
# 根据渠道id查询结算信息
res = json.loads(PlatformAction.test_api_78dk_platform_cm_base_clear_viewClearingAccountByChannel(channelid))
Assertion.verity(res['code'], '10000')
Assertion.verity(res['msg'], '成功')
Assertion.verityContain(res['data'], 'clearingAccountUuid')
Assertion.verity(res['data']['channelOrMerchantUuid'], channelOrMerchantUuid)
global clearing_account_uuid
clearing_account_uuid = res['data']['clearingAccountUuid']
def test_43_api_78dk_platform_cm_base_clear_updateClearingAccount(self):
# 编辑结算信息
res = PlatformAction.test_api_78dk_platform_cm_base_clear_updateClearingAccount(
accountname=channelname, accountnumber='6011826564542944', accountopeningbank='农业银行',
accounttype='public_accounts', branchname='支行名称', chamberlainidcard='431081199812097872',
channelormerchantuuid=channelid, city='510100', clearingaccountuuid=clearing_account_uuid,
linenumber='6756765756', phone='15179366892', province='510000', region='510101')
Assertion.verity(json.loads(res)['code'], '10000')
| [
"[email protected]"
] | |
3453ef9fb376dd038b165acdf01d35326cba96a5 | ae8590dc2dd0dd6530868ccd52702d06e5d96fa1 | /copy of source code.py | b5a8ca45a3da63b6f0813916fd6af3bbdc1b8dd5 | [] | no_license | abhisek08/Python-Basics-Part-1- | e3bec8e4d7f9e484c4bcade7763842334c93f4b0 | 3687dd6ebb01f2289b3fa226cea28b564894a68f | refs/heads/master | 2022-09-08T11:42:28.871012 | 2020-05-25T07:58:01 | 2020-05-25T07:58:01 | 266,717,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py | '''
Write a Python program to create a copy of its own source code.
'''
print()
print((lambda str='print(lambda str=%r: (str %% str))()': (str % str))())
print() | [
"[email protected]"
] | |
6271957d52b5f94a002352d4446b733e556860f3 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03060/s903224060.py | f50e8567bb440e68db273f2b40d0b0d865cac43a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | import sys
sys.setrecursionlimit(10**6)
n = int(input())
v = list(map(int, input().split()))
c = list(map(int, input().split()))
#n, m = map(int, input().split())
#s = input()
#s,t = input().split()
#a = [int(input()) for _ in range(n)]
#
#readline = sys.stdin.readline
#n,m = [int(i) for i in readline().split()]
#ab = [[int(i) for i in readline().split()] for _ in range(n)]
ans = 0
for i,j in zip(v,c):
if i > j:
ans += i-j
print(ans) | [
"[email protected]"
] | |
b477e38610d2ed4605aaa78498aa7389b05d94b8 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03105/s179203905.py | bd5dac2f36a486f1f4abf2d8cb664bb8cc4740c2 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 69 | py | a,b,c = map(int,input().split())
print(int(b/a) if int(b/a)<c else c) | [
"[email protected]"
] | |
706e688fdb07eda39ccfb89da8095d3584bb4bd5 | 397e125e94f4f139f2bf5055824d81f24b8b1757 | /ABC/135/D-3.py | c6a73dbe57caf4af3072b6db80e01a6523b4e3e8 | [] | no_license | tails1434/Atcoder | ecbab6ee238e3f225551297db961b1b502841fa4 | e7c7fed36be46bbaaf020a70997842240ba98d62 | refs/heads/master | 2021-07-07T00:31:49.235625 | 2020-09-30T01:42:01 | 2020-09-30T01:42:01 | 189,009,622 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 66 | py | def main():
S = input()
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
2558d70cd897e6f36debd3e0e01e05c3f02cf98a | 5b9b1139848db270f5987d4d539c39a30115e87b | /solutions/inod.py | be970934ef14c54ddf4e4fd8782a9c1426f94dc3 | [] | no_license | mady1258/Bioinformatics_Stronghold | 3d0f82b3cff0066246eb6641368a4ea4fe366362 | 6c7daf1ea92b2a74657c9ce40a19d356177d983e | refs/heads/master | 2023-02-04T14:00:54.985085 | 2020-12-23T11:32:03 | 2020-12-23T11:32:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | import sys
if __name__ == "__main__":
'''
Given: A positive integer n (3≤n≤10000).
Return: The number of internal nodes of any unrooted binary tree having n leaves.
'''
n = int(sys.stdin.readline().rstrip())
# An unrooted tree with n leaves and m internal nodes should have n + 3m total degrees.
# (n + 3m) / 2 = n + m - 1
# m = n - 2
print(n - 2)
| [
"[email protected]"
] | |
533f6ecce51b82f53b872fc88c7b8e9ebcf7864b | fb2cc597f319380d228fc15c4008760a82203687 | /var/spack/repos/builtin/packages/e3sm-kernels/package.py | 2e8534ee60c1371305fb8b850092d450a78a511f | [
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"LGPL-2.1-only"
] | permissive | JayjeetAtGithub/spack | c41b5debcbe139abb2eab626210505b7f930d637 | 6c2df00443a2cd092446c7d84431ae37e64e4296 | refs/heads/develop | 2023-03-21T02:35:58.391230 | 2022-10-08T22:57:45 | 2022-10-08T22:57:45 | 205,764,532 | 0 | 0 | MIT | 2019-09-02T02:44:48 | 2019-09-02T02:44:47 | null | UTF-8 | Python | false | false | 1,887 | py | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack.package import *
class E3smKernels(MakefilePackage):
"""
Climate kernels for Co-design that originate from the Energy
Exascale Earth System Model (E3SM).
"""
homepage = "https://github.com/e3SM-Project/codesign-kernels"
url = "https://github.com/E3SM-Project/codesign-kernels/archive/refs/tags/v1.0.tar.gz"
git = "https://github.com/E3SM-Project/codesign-kernels.git"
maintainers = ["sarats", "philipwjones"]
version("master", branch="master")
version("1.0", sha256="358249785ba9f95616feecbb6f37f7694646568499c11b2094c9233999c6cc95")
variant(
"kernel",
default="atmosphere",
values=(
"atmosphere",
"mmf-mpdata-tracer",
),
description="Specify E3SM Kernel to Build",
multi=False,
)
@property
def build_directory(self):
return self.spec.variants["kernel"].value
@property
def build_targets(self):
# Spack will provide optimization flags
# But we still need to pass in fortran flags for gfortran
args = []
# Test for gfortran specifically due to hybrid compilers like llvm
if "gfortran" in self.compiler.fc:
args.append("FFLAGS=-ffree-line-length-none")
return args
def install(self, spec, prefix):
# Manually copy binaries over
mkdir(prefix.bin)
if self.spec.variants["kernel"].value == "atmosphere":
install(os.path.join("atmosphere", "atm"), prefix.bin.atm)
elif self.spec.variants["kernel"].value == "mmf-mpdata-tracer":
install(os.path.join("mmf-mpdata-tracer", "advect"), prefix.bin.advect)
| [
"[email protected]"
] | |
d8534409bd889016971a612a14dde9520fab2066 | 255e19ddc1bcde0d3d4fe70e01cec9bb724979c9 | /all-gists/3491924/snippet.py | 206562a28b41564df1d60ab42576e7b4bb1dd96a | [
"MIT"
] | permissive | gistable/gistable | 26c1e909928ec463026811f69b61619b62f14721 | 665d39a2bd82543d5196555f0801ef8fd4a3ee48 | refs/heads/master | 2023-02-17T21:33:55.558398 | 2023-02-11T18:20:10 | 2023-02-11T18:20:10 | 119,861,038 | 76 | 19 | null | 2020-07-26T03:14:55 | 2018-02-01T16:19:24 | Python | UTF-8 | Python | false | false | 860 | py | import os
from urlparse import urlparse
from flask import Flask
from pymongo import MongoClient
MONGO_URL = os.environ.get('MONGOHQ_URL')
if MONGO_URL:
# Get client
client = MongoClient(MONGO_URL)
# Get database
db = client[urlparse(MONGO_URL).path[1:]]
else:
# Not on an app with the MongoHQ add-on, do some localhost action
client = MongoClient('localhost', 27017)
db = client['MyDB']
app = Flask(__name__)
app.debug = True
@app.route('/')
def hello():
myObj = db.analytics.find_one({'event':'page_views'})
if not myObj:
myObj = {'event':'page_views', 'count':1}
else:
myObj['count'] += 1
db.analytics.save(myObj)
return 'Hello World! ' + str(myObj['count'])
if __name__ == '__main__':
# Bind to PORT if defined, otherwise default to 5000.
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port) | [
"[email protected]"
] | |
7eae6423802f038e65f587ba7edb68bb345f425b | 7a7a0663efd2c25adf26f6552e3c4e95e9ac4e63 | /holon/models/rmm.py | b27372b02be9352ffcb461a93b9dea68933d012b | [] | no_license | smizell/holon | 2c5654094cb007a9fceae621630126d9173c4f2c | 9cdf39b74cee31ed9c84c94b792814f0b9fc6483 | refs/heads/main | 2023-02-03T15:46:41.059117 | 2020-12-09T23:10:39 | 2020-12-09T23:10:39 | 317,355,430 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 572 | py | from holon.principles import Principle
level0 = Principle(
name="RMM Level 0",
reference="https://martinfowler.com/articles/richardsonMaturityModel.html#level0",
)
level1 = Principle(
name="RMM Level 1",
reference="https://martinfowler.com/articles/richardsonMaturityModel.html#level1",
)
level2 = Principle(
name="RMM Level 2",
reference="https://martinfowler.com/articles/richardsonMaturityModel.html#level2",
)
level3 = Principle(
name="RMM Level 3",
reference="https://martinfowler.com/articles/richardsonMaturityModel.html#level3",
)
| [
"[email protected]"
] | |
cbad53e84d05323b976ae9aca6f270b8cf6f450f | c5a6e19e7ce45c565e0353b8d6bd768a1c573901 | /catalyst/data/minute_bars.py | 243de701ad10bc72068c3659a1dde48d41fe6a2d | [
"Apache-2.0"
] | permissive | guilhermeprokisch/catalyst | d58b3de83df636209f96fda0587330da1ee7c79b | 21e096b261912d9e905584178d6ee626072c23cb | refs/heads/master | 2020-08-02T11:37:22.125602 | 2019-09-27T14:28:32 | 2019-09-27T14:28:32 | 211,336,899 | 0 | 0 | Apache-2.0 | 2019-09-27T14:25:22 | 2019-09-27T14:25:21 | null | UTF-8 | Python | false | false | 48,928 | py | # Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABCMeta, abstractmethod
import json
import os
from glob import glob
from os.path import join
from textwrap import dedent
from lru import LRU
import bcolz
from bcolz import ctable
from intervaltree import IntervalTree
import logbook
import numpy as np
import pandas as pd
from pandas import HDFStore
import tables
from six import with_metaclass
from toolz import keymap, valmap
from catalyst.data._minute_bar_internal import (
minute_value,
find_position_of_minute,
find_last_traded_position_internal
)
from catalyst.gens.sim_engine import NANOS_IN_MINUTE
from catalyst.data.bar_reader import BarReader, NoDataOnDate
from catalyst.data.us_equity_pricing import check_uint64_safe
from catalyst.utils.calendars import get_calendar
from catalyst.utils.cli import maybe_show_progress
from catalyst.utils.memoize import lazyval
from catalyst.constants import LOG_LEVEL
logger = logbook.Logger('MinuteBars', level=LOG_LEVEL)
US_EQUITIES_MINUTES_PER_DAY = 390
FUTURES_MINUTES_PER_DAY = 1440
DEFAULT_EXPECTEDLEN = US_EQUITIES_MINUTES_PER_DAY * 252 * 15
OHLC_RATIO = 100000000
class BcolzMinuteOverlappingData(Exception):
pass
class BcolzMinuteWriterColumnMismatch(Exception):
pass
class MinuteBarReader(BarReader):
@property
def data_frequency(self):
return "minute"
def _calc_minute_index(market_opens, minutes_per_day):
minutes = np.zeros(len(market_opens) * minutes_per_day,
dtype='datetime64[ns]')
deltas = np.arange(0, minutes_per_day, dtype='timedelta64[m]')
for i, market_open in enumerate(market_opens):
start = market_open.asm8
minute_values = start + deltas
start_ix = minutes_per_day * i
end_ix = start_ix + minutes_per_day
minutes[start_ix:end_ix] = minute_values
return pd.to_datetime(minutes, utc=True, box=True)
def _sid_subdir_path(sid):
"""
Format subdir path to limit the number directories in any given
subdirectory to 100.
The number in each directory is designed to support at least 100000
equities.
Parameters
----------
sid : int
Asset identifier.
Returns
-------
out : string
A path for the bcolz rootdir, including subdirectory prefixes based on
the padded string representation of the given sid.
e.g. 1 is formatted as 00/00/000001.bcolz
"""
padded_sid = format(sid, '06')
return os.path.join(
# subdir 1 00/XX
padded_sid[0:2],
# subdir 2 XX/00
padded_sid[2:4],
"{0}.bcolz".format(str(padded_sid))
)
def convert_cols(cols, scale_factor, sid, invalid_data_behavior):
"""Adapt OHLCV columns into uint64 columns.
Parameters
----------
cols : dict
A dict mapping each column name (open, high, low, close, volume)
to a float column to convert to uint64.
scale_factor : int
Factor to use to scale float values before converting to uint64.
sid : int
Sid of the relevant asset, for logging.
invalid_data_behavior : str
Specifies behavior when data cannot be converted to uint32.
If 'raise', raises an exception.
If 'warn', logs a warning and filters out incompatible values.
If 'ignore', silently filters out incompatible values.
"""
scaled_opens = np.nan_to_num(cols['open']) * scale_factor
scaled_highs = np.nan_to_num(cols['high']) * scale_factor
scaled_lows = np.nan_to_num(cols['low']) * scale_factor
scaled_closes = np.nan_to_num(cols['close']) * scale_factor
scaled_volumes = np.nan_to_num(cols['volume']) * scale_factor
exclude_mask = np.zeros_like(scaled_opens, dtype=bool)
for col_name, scaled_col in [
('open', scaled_opens),
('high', scaled_highs),
('low', scaled_lows),
('close', scaled_closes),
('volume', scaled_volumes),
]:
max_val = scaled_col.max()
try:
check_uint64_safe(max_val, col_name)
except ValueError:
if invalid_data_behavior == 'raise':
raise
if invalid_data_behavior == 'warn':
logger.warn(
'Values for sid={}, col={} contain some too large for '
'uint64 (max={}), filtering them out',
sid, col_name, max_val,
)
# We want to exclude all rows that have an unsafe value in
# this column.
exclude_mask &= (scaled_col >= np.iinfo(np.uint64).max)
# Convert all cols to uint32.
opens = scaled_opens.astype(np.uint64)
highs = scaled_highs.astype(np.uint64)
lows = scaled_lows.astype(np.uint64)
closes = scaled_closes.astype(np.uint64)
volumes = scaled_volumes.astype(np.uint64)
# Exclude rows with unsafe values by setting to zero.
opens[exclude_mask] = 0
highs[exclude_mask] = 0
lows[exclude_mask] = 0
closes[exclude_mask] = 0
volumes[exclude_mask] = 0
return opens, highs, lows, closes, volumes
class BcolzMinuteBarMetadata(object):
"""
Parameters
----------
ohlc_ratio : int
The factor by which the pricing data is multiplied so that the
float data can be stored as an integer.
calendar : catalyst.utils.calendars.trading_calendar.TradingCalendar
The TradingCalendar on which the minute bars are based.
start_session : datetime
The first trading session in the data set.
end_session : datetime
The last trading session in the data set.
minutes_per_day : int
The number of minutes per each period.
"""
FORMAT_VERSION = 3
METADATA_FILENAME = 'metadata.json'
@classmethod
def metadata_path(cls, rootdir):
return os.path.join(rootdir, cls.METADATA_FILENAME)
@classmethod
def read(cls, rootdir):
path = cls.metadata_path(rootdir)
with open(path) as fp:
raw_data = json.load(fp)
try:
version = raw_data['version']
except KeyError:
# Version was first written with version 1, assume 0,
# if version does not match.
version = 0
default_ohlc_ratio = raw_data['ohlc_ratio']
if version >= 1:
minutes_per_day = raw_data['minutes_per_day']
else:
# version 0 always assumed US equities.
minutes_per_day = US_EQUITIES_MINUTES_PER_DAY
if version >= 2:
calendar = get_calendar(raw_data['calendar_name'])
start_session = pd.Timestamp(
raw_data['start_session'], tz='UTC')
end_session = pd.Timestamp(raw_data['end_session'], tz='UTC')
else:
# No calendar info included in older versions, so
# default to NYSE.
calendar = get_calendar('NYSE')
start_session = pd.Timestamp(
raw_data['first_trading_day'], tz='UTC')
end_session = calendar.minute_to_session_label(
pd.Timestamp(
raw_data['market_closes'][-1], unit='m', tz='UTC')
)
if version >= 3:
ohlc_ratios_per_sid = raw_data['ohlc_ratios_per_sid']
if ohlc_ratios_per_sid is not None:
ohlc_ratios_per_sid = keymap(int, ohlc_ratios_per_sid)
else:
ohlc_ratios_per_sid = None
return cls(
default_ohlc_ratio,
ohlc_ratios_per_sid,
calendar,
start_session,
end_session,
minutes_per_day,
version=version,
)
def __init__(
self,
default_ohlc_ratio,
ohlc_ratios_per_sid,
calendar,
start_session,
end_session,
minutes_per_day,
version=FORMAT_VERSION,
):
self.calendar = calendar
self.start_session = start_session
self.end_session = end_session
self.default_ohlc_ratio = default_ohlc_ratio
self.ohlc_ratios_per_sid = ohlc_ratios_per_sid
self.minutes_per_day = minutes_per_day
self.version = version
def write(self, rootdir):
"""
Write the metadata to a JSON file in the rootdir.
Values contained in the metadata are:
version : int
The value of FORMAT_VERSION of this class.
ohlc_ratio : int
The default ratio by which to multiply the pricing data to
convert the floats from floats to an integer to fit within
the np.uint64. If ohlc_ratios_per_sid is None or does not
contain a mapping for a given sid, this ratio is used.
ohlc_ratios_per_sid : dict
A dict mapping each sid in the output to the factor by
which the pricing data is multiplied so that the float data
can be stored as an integer.
minutes_per_day : int
The number of minutes per each period.
calendar_name : str
The name of the TradingCalendar on which the minute bars are
based.
start_session : datetime
'YYYY-MM-DD' formatted representation of the first trading
session in the data set.
end_session : datetime
'YYYY-MM-DD' formatted representation of the last trading
session in the data set.
Deprecated, but included for backwards compatibility:
first_trading_day : string
'YYYY-MM-DD' formatted representation of the first trading day
available in the dataset.
market_opens : list
List of int64 values representing UTC market opens as
minutes since epoch.
market_closes : list
List of int64 values representing UTC market closes as
minutes since epoch.
"""
calendar = self.calendar
slicer = calendar.schedule.index.slice_indexer(
self.start_session,
self.end_session,
)
schedule = calendar.schedule[slicer]
market_opens = schedule.market_open
market_closes = schedule.market_close
metadata = {
'version': self.version,
'ohlc_ratio': self.default_ohlc_ratio,
'ohlc_ratios_per_sid': self.ohlc_ratios_per_sid,
'minutes_per_day': self.minutes_per_day,
'calendar_name': self.calendar.name,
'start_session': str(self.start_session.date()),
'end_session': str(self.end_session.date()),
# Write these values for backwards compatibility
'first_trading_day': str(self.start_session.date()),
'market_opens': (market_opens.values.astype('datetime64[m]').
astype(np.int64).tolist()),
'market_closes': (market_closes.values.astype('datetime64[m]').
astype(np.int64).tolist()),
}
with open(self.metadata_path(rootdir), 'w+') as fp:
json.dump(metadata, fp)
class BcolzMinuteBarWriter(object):
"""
Class capable of writing minute OHLCV data to disk into bcolz format.
Parameters
----------
rootdir : string
Path to the root directory into which to write the metadata and
bcolz subdirectories.
calendar : catalyst.utils.calendars.trading_calendar.TradingCalendar
The trading calendar on which to base the minute bars. Used to
get the market opens used as a starting point for each periodic
span of minutes in the index, and the market closes that
correspond with the market opens.
minutes_per_day : int
The number of minutes per each period. Defaults to 390, the mode
of minutes in NYSE trading days.
start_session : datetime
The first trading session in the data set.
end_session : datetime
The last trading session in the data set.
default_ohlc_ratio : int, optional
The default ratio by which to multiply the pricing data to
convert from floats to integers that fit within np.uint64. If
ohlc_ratios_per_sid is None or does not contain a mapping for a
given sid, this ratio is used. Default is OHLC_RATIO (10^8).
ohlc_ratios_per_sid : dict, optional
A dict mapping each sid in the output to the ratio by which to
multiply the pricing data to convert the floats from floats to
an integer to fit within the np.uint64.
expectedlen : int, optional
The expected length of the dataset, used when creating the initial
bcolz ctable.
If the expectedlen is not used, the chunksize and corresponding
compression ratios are not ideal.
Defaults to supporting 15 years of NYSE equity market data.
see: http://bcolz.blosc.org/opt-tips.html#informing-about-the-length-of-your-carrays # noqa
write_metadata : bool, optional
If True, writes the minute bar metadata (on init of the writer).
If False, no metadata is written (existing metadata is
retained). Default is True.
Notes
-----
Writes a bcolz directory for each individual sid, all contained within
a root directory which also contains metadata about the entire dataset.
Each individual asset's data is stored as a bcolz table with a column for
each pricing field: (open, high, low, close, volume)
The open, high, low, close and volume columns are integers which are 10^8 times
the quoted price, so that the data can represented and stored as an
np.uint64, supporting market prices quoted up to the 1/10^8-th place.
The 'index' for each individual asset are a repeating period of minutes of
length `minutes_per_day` starting from each market open.
The file format does not account for half-days.
e.g.:
2016-01-19 14:31
2016-01-19 14:32
...
2016-01-19 20:59
2016-01-19 21:00
2016-01-20 14:31
2016-01-20 14:32
...
2016-01-20 20:59
2016-01-20 21:00
All assets are written with a common 'index', sharing a common first
trading day. Assets that do not begin trading until after the first trading
day will have zeros for all pricing data up and until data is traded.
'index' is in quotations, because bcolz does not provide an index. The
format allows index-like behavior by writing each minute's data into the
corresponding position of the enumeration of the aforementioned datetime
index.
The datetimes which correspond to each position are written in the metadata
as integer nanoseconds since the epoch into the `minute_index` key.
See Also
--------
catalyst.data.minute_bars.BcolzMinuteBarReader
"""
COL_NAMES = ('open', 'high', 'low', 'close', 'volume')
def __init__(self,
rootdir,
calendar,
start_session,
end_session,
minutes_per_day,
default_ohlc_ratio=OHLC_RATIO,
ohlc_ratios_per_sid=None,
expectedlen=DEFAULT_EXPECTEDLEN,
write_metadata=True):
self._rootdir = rootdir
self._start_session = start_session
self._end_session = end_session
self._calendar = calendar
slicer = (
calendar.schedule.index.slice_indexer(start_session, end_session))
self._schedule = calendar.schedule[slicer]
self._session_labels = self._schedule.index
self._minutes_per_day = minutes_per_day
self._expectedlen = expectedlen
self._default_ohlc_ratio = default_ohlc_ratio
self._ohlc_ratios_per_sid = ohlc_ratios_per_sid
self._minute_index = _calc_minute_index(
self._schedule.market_open, self._minutes_per_day)
if write_metadata:
metadata = BcolzMinuteBarMetadata(
self._default_ohlc_ratio,
self._ohlc_ratios_per_sid,
self._calendar,
self._start_session,
self._end_session,
self._minutes_per_day,
)
metadata.write(self._rootdir)
@classmethod
def open(cls, rootdir, end_session=None):
"""
Open an existing ``rootdir`` for writing.
Parameters
----------
end_session : Timestamp (optional)
When appending, the intended new ``end_session``.
"""
metadata = BcolzMinuteBarMetadata.read(rootdir)
return BcolzMinuteBarWriter(
rootdir,
metadata.calendar,
metadata.start_session,
end_session if end_session is not None else metadata.end_session,
metadata.minutes_per_day,
metadata.default_ohlc_ratio,
metadata.ohlc_ratios_per_sid,
write_metadata=end_session is not None
)
@property
def first_trading_day(self):
return self._start_session
def ohlc_ratio_for_sid(self, sid):
if self._ohlc_ratios_per_sid is not None:
try:
return self._ohlc_ratios_per_sid[sid]
except KeyError:
pass
# If no ohlc_ratios_per_sid dict is passed, or if the specified
# sid is not in the dict, fallback to the general ohlc_ratio.
return self._default_ohlc_ratio
def sidpath(self, sid):
"""
Parameters
----------
sid : int
Asset identifier.
Returns
-------
out : string
Full path to the bcolz rootdir for the given sid.
"""
sid_subdir = _sid_subdir_path(sid)
return join(self._rootdir, sid_subdir)
def last_date_in_output_for_sid(self, sid):
"""
Parameters
----------
sid : int
Asset identifier.
Returns
-------
out : pd.Timestamp
The midnight of the last date written in to the output for the
given sid.
"""
sizes_path = "{0}/close/meta/sizes".format(self.sidpath(sid))
if not os.path.exists(sizes_path):
return pd.NaT
with open(sizes_path, mode='r') as f:
sizes = f.read()
data = json.loads(sizes)
# use integer division so that the result is an int
# for pandas index later https://github.com/pandas-dev/pandas/blob/master/pandas/tseries/base.py#L247 # noqa
num_days = data['shape'][0] // self._minutes_per_day
if num_days == 0:
# empty container
return pd.NaT
return self._session_labels[num_days - 1]
def _init_ctable(self, path):
"""
Create empty ctable for given path.
Parameters
----------
path : string
The path to rootdir of the new ctable.
"""
# Only create the containing subdir on creation.
# This is not to be confused with the `.bcolz` directory, but is the
# directory up one level from the `.bcolz` directories.
sid_containing_dirname = os.path.dirname(path)
if not os.path.exists(sid_containing_dirname):
# Other sids may have already created the containing directory.
os.makedirs(sid_containing_dirname)
initial_array = np.empty(0, np.uint64)
table = ctable(
rootdir=path,
columns=[
initial_array,
initial_array,
initial_array,
initial_array,
initial_array,
],
names=[
'open',
'high',
'low',
'close',
'volume'
],
expectedlen=self._expectedlen,
mode='w',
)
table.flush()
return table
def _ensure_ctable(self, sid):
"""Ensure that a ctable exists for ``sid``, then return it."""
sidpath = self.sidpath(sid)
if not os.path.exists(sidpath):
return self._init_ctable(sidpath)
return bcolz.ctable(rootdir=sidpath, mode='a')
def _zerofill(self, table, numdays):
# Compute the number of minutes to be filled, accounting for the
# possibility of a partial day's worth of minutes existing for
# the previous day.
minute_offset = len(table) % self._minutes_per_day
num_to_prepend = numdays * self._minutes_per_day - minute_offset
prepend_array = np.zeros(num_to_prepend, np.uint64)
# Fill all OHLCV with zeros.
table.append([prepend_array] * 5)
table.flush()
def pad(self, sid, date):
"""
Fill sid container with empty data through the specified date.
If the last recorded trade is not at the close, then that day will be
padded with zeros until its close. Any day after that (up to and
including the specified date) will be padded with `minute_per_day`
worth of zeros
Parameters
----------
sid : int
The asset identifier for the data being written.
date : datetime-like
The date used to calculate how many slots to be pad.
The padding is done through the date, i.e. after the padding is
done the `last_date_in_output_for_sid` will be equal to `date`
"""
table = self._ensure_ctable(sid)
last_date = self.last_date_in_output_for_sid(sid)
tds = self._session_labels
if date <= last_date or date < tds[0]:
# No need to pad.
return
if last_date == pd.NaT:
# If there is no data, determine how many days to add so that
# desired days are written to the correct slots.
days_to_zerofill = tds[tds.slice_indexer(end=date)]
else:
days_to_zerofill = tds[tds.slice_indexer(
start=last_date + tds.freq,
end=date)]
self._zerofill(table, len(days_to_zerofill))
new_last_date = self.last_date_in_output_for_sid(sid)
assert new_last_date == date, "new_last_date={0} != date={1}".format(
new_last_date, date)
def set_sid_attrs(self, sid, **kwargs):
"""Write all the supplied kwargs as attributes of the sid's file.
"""
table = self._ensure_ctable(sid)
for k, v in kwargs.items():
table.attrs[k] = v
def write(self, data, show_progress=False, invalid_data_behavior='warn'):
"""Write a stream of minute data.
Parameters
----------
data : iterable[(int, pd.DataFrame)]
The data to write. Each element should be a tuple of sid, data
where data has the following format:
columns : ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64
index : DatetimeIndex of market minutes.
A given sid may appear more than once in ``data``; however,
the dates must be strictly increasing.
show_progress : bool, optional
Whether or not to show a progress bar while writing.
"""
ctx = maybe_show_progress(
data,
show_progress=show_progress,
item_show_func=lambda e: e if e is None else str(e[0]),
label="Merging minute equity files:",
)
write_sid = self.write_sid
with ctx as it:
for e in it:
write_sid(*e, invalid_data_behavior=invalid_data_behavior)
def write_sid(self, sid, df, invalid_data_behavior='warn'):
"""
Write the OHLCV data for the given sid.
If there is no bcolz ctable yet created for the sid, create it.
If the length of the bcolz ctable is not exactly to the date before
the first day provided, fill the ctable with 0s up to that date.
Parameters
----------
sid : int
The asset identifer for the data being written.
df : pd.DataFrame
DataFrame of market data with the following characteristics.
columns : ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64
index : DatetimeIndex of market minutes.
"""
cols = {
'open': df.open.values,
'high': df.high.values,
'low': df.low.values,
'close': df.close.values,
'volume': df.volume.values,
}
dts = df.index.values
# Call internal method, since DataFrame has already ensured matching
# index and value lengths.
self._write_cols(sid, dts, cols, invalid_data_behavior)
def write_cols(self, sid, dts, cols, invalid_data_behavior='warn'):
"""
Write the OHLCV data for the given sid.
If there is no bcolz ctable yet created for the sid, create it.
If the length of the bcolz ctable is not exactly to the date before
the first day provided, fill the ctable with 0s up to that date.
Parameters
----------
sid : int
The asset identifier for the data being written.
dts : datetime64 array
The dts corresponding to values in cols.
cols : dict of str -> np.array
dict of market data with the following characteristics.
keys are ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64
"""
if not all(len(dts) == len(cols[name]) for name in self.COL_NAMES):
raise BcolzMinuteWriterColumnMismatch(
"Length of dts={0} should match cols: {1}".format(
len(dts),
" ".join("{0}={1}".format(name, len(cols[name]))
for name in self.COL_NAMES)))
self._write_cols(sid, dts, cols, invalid_data_behavior)
def _write_cols(self, sid, dts, cols, invalid_data_behavior):
"""
Internal method for `write_cols` and `write`.
Parameters
----------
sid : int
The asset identifier for the data being written.
dts : datetime64 array
The dts corresponding to values in cols.
cols : dict of str -> np.array
dict of market data with the following characteristics.
keys are ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64
"""
table = self._ensure_ctable(sid)
tds = self._session_labels
input_first_day = self._calendar.minute_to_session_label(
pd.Timestamp(dts[0]), direction='previous')
last_date = self.last_date_in_output_for_sid(sid)
day_before_input = input_first_day - tds.freq
self.pad(sid, day_before_input)
table = self._ensure_ctable(sid)
# Get the number of minutes already recorded in this sid's ctable
num_rec_mins = table.size
all_minutes = self._minute_index
# Get the latest minute we wish to write to the ctable
last_minute_to_write = pd.Timestamp(dts[-1], tz='UTC')
# In the event that we've already written some minutely data to the
# ctable, guard against overwriting that data.
if num_rec_mins > 0:
last_recorded_minute = all_minutes[num_rec_mins - 1]
if last_minute_to_write <= last_recorded_minute:
raise BcolzMinuteOverlappingData(dedent("""
Data with last_date={0} already includes input start={1} for
sid={2}""".strip()).format(last_date, input_first_day, sid))
latest_min_count = all_minutes.get_loc(last_minute_to_write)
# Get all the minutes we wish to write (all market minutes after the
# latest currently written, up to and including last_minute_to_write)
all_minutes_in_window = all_minutes[num_rec_mins:latest_min_count + 1]
minutes_count = all_minutes_in_window.size
open_col = np.zeros(minutes_count, dtype=np.uint64)
high_col = np.zeros(minutes_count, dtype=np.uint64)
low_col = np.zeros(minutes_count, dtype=np.uint64)
close_col = np.zeros(minutes_count, dtype=np.uint64)
vol_col = np.zeros(minutes_count, dtype=np.uint64)
dt_ixs = np.searchsorted(all_minutes_in_window.values,
dts.astype('datetime64[ns]'))
ohlc_ratio = self.ohlc_ratio_for_sid(sid)
(
open_col[dt_ixs],
high_col[dt_ixs],
low_col[dt_ixs],
close_col[dt_ixs],
vol_col[dt_ixs],
) = convert_cols(cols, ohlc_ratio, sid, invalid_data_behavior)
table.append([
open_col,
high_col,
low_col,
close_col,
vol_col
])
table.flush()
def data_len_for_day(self, day):
"""
Return the number of data points up to and including the
provided day.
"""
day_ix = self._session_labels.get_loc(day)
# Add one to the 0-indexed day_ix to get the number of days.
num_days = day_ix + 1
return num_days * self._minutes_per_day
def truncate(self, date):
"""Truncate data beyond this date in all ctables."""
truncate_slice_end = self.data_len_for_day(date)
glob_path = os.path.join(self._rootdir, "*", "*", "*.bcolz")
sid_paths = sorted(glob(glob_path))
for sid_path in sid_paths:
file_name = os.path.basename(sid_path)
try:
table = bcolz.open(rootdir=sid_path)
except IOError:
continue
if table.len <= truncate_slice_end:
logger.info("{0} not past truncate date={1}.", file_name, date)
continue
logger.info(
"Truncating {0} at end_date={1}", file_name, date.date()
)
table.resize(truncate_slice_end)
# Update end session in metadata.
metadata = BcolzMinuteBarMetadata.read(self._rootdir)
metadata.end_session = date
metadata.write(self._rootdir)
class BcolzMinuteBarReader(MinuteBarReader):
"""
Reader for data written by BcolzMinuteBarWriter
Parameters
----------
rootdir : string
The root directory containing the metadata and asset bcolz
directories.
See Also
--------
catalyst.data.minute_bars.BcolzMinuteBarWriter
"""
FIELDS = ('open', 'high', 'low', 'close', 'volume')
def __init__(self, rootdir, sid_cache_size=1000):
self._rootdir = rootdir
metadata = self._get_metadata()
self._start_session = metadata.start_session
self._end_session = metadata.end_session
self.calendar = metadata.calendar
slicer = self.calendar.schedule.index.slice_indexer(
self._start_session,
self._end_session,
)
self._schedule = self.calendar.schedule[slicer]
self._market_opens = self._schedule.market_open
self._market_open_values = self._market_opens.values. \
astype('datetime64[m]').astype(np.int64)
self._market_closes = self._schedule.market_close
self._market_close_values = self._market_closes.values. \
astype('datetime64[m]').astype(np.int64)
self._default_ohlc_inverse = 1.0 / metadata.default_ohlc_ratio
ohlc_ratios = metadata.ohlc_ratios_per_sid
if ohlc_ratios:
self._ohlc_inverses_per_sid = (
valmap(lambda x: 1.0 / x, ohlc_ratios))
else:
self._ohlc_inverses_per_sid = None
self._minutes_per_day = metadata.minutes_per_day
self._carrays = {
field: LRU(sid_cache_size)
for field in self.FIELDS
}
self._last_get_value_dt_position = None
self._last_get_value_dt_value = None
# This is to avoid any bad data or other performance-killing situation
# where there a consecutive streak of 0 (no volume) starting at an
# asset's start date.
# if asset 1 started on 2015-01-03 but its first trade is 2015-01-06
# 10:31 AM US/Eastern, this dict would store {1: 23675971},
# which is the minute epoch of that date.
self._known_zero_volume_dict = {}
def _get_metadata(self):
return BcolzMinuteBarMetadata.read(self._rootdir)
@property
def trading_calendar(self):
return self.calendar
@lazyval
def last_available_dt(self):
_, close = self.calendar.open_and_close_for_session(self._end_session)
return close
@property
def first_trading_day(self):
return self._start_session
def _ohlc_ratio_inverse_for_sid(self, sid):
if self._ohlc_inverses_per_sid is not None:
try:
return self._ohlc_inverses_per_sid[sid]
except KeyError:
pass
# If we can not get a sid-specific OHLC inverse for this sid,
# fallback to the default.
return self._default_ohlc_inverse
def _minutes_to_exclude(self):
"""
Calculate the minutes which should be excluded when a window
occurs on days which had an early close, i.e. days where the close
based on the regular period of minutes per day and the market close
do not match.
Returns
-------
List of DatetimeIndex representing the minutes to exclude because
of early closes.
"""
market_opens = self._market_opens.values.astype('datetime64[m]')
market_closes = self._market_closes.values.astype('datetime64[m]')
minutes_per_day = (market_closes - market_opens).astype(np.int64)
early_indices = np.where(
minutes_per_day != self._minutes_per_day - 1)[0]
early_opens = self._market_opens[early_indices]
early_closes = self._market_closes[early_indices]
minutes = [(market_open, early_close)
for market_open, early_close
in zip(early_opens, early_closes)]
return minutes
@lazyval
def _minute_exclusion_tree(self):
"""
Build an interval tree keyed by the start and end of each range
of positions should be dropped from windows. (These are the minutes
between an early close and the minute which would be the close based
on the regular period if there were no early close.)
The value of each node is the same start and end position stored as
a tuple.
The data is stored as such in support of a fast answer to the question,
does a given start and end position overlap any of the exclusion spans?
Returns
-------
IntervalTree containing nodes which represent the minutes to exclude
because of early closes.
"""
itree = IntervalTree()
for market_open, early_close in self._minutes_to_exclude():
start_pos = self._find_position_of_minute(early_close) + 1
end_pos = (
self._find_position_of_minute(market_open)
+
self._minutes_per_day
-
1
)
data = (start_pos, end_pos)
itree[start_pos:end_pos + 1] = data
return itree
def _exclusion_indices_for_range(self, start_idx, end_idx):
"""
Returns
-------
List of tuples of (start, stop) which represent the ranges of minutes
which should be excluded when a market minute window is requested.
"""
itree = self._minute_exclusion_tree
if itree.overlaps(start_idx, end_idx):
ranges = []
intervals = itree[start_idx:end_idx]
for interval in intervals:
ranges.append(interval.data)
return sorted(ranges)
else:
return None
def _get_carray_path(self, sid, field):
sid_subdir = _sid_subdir_path(sid)
# carrays are subdirectories of the sid's rootdir
return os.path.join(self._rootdir, sid_subdir, field)
def _open_minute_file(self, field, sid):
sid = int(sid)
try:
carray = self._carrays[field][sid]
except KeyError:
carray = self._carrays[field][sid] = \
bcolz.carray(rootdir=self._get_carray_path(sid, field),
mode='r')
return carray
def table_len(self, sid):
"""Returns the length of the underlying table for this sid."""
return len(self._open_minute_file('close', sid))
def get_sid_attr(self, sid, name):
sid_subdir = _sid_subdir_path(sid)
sid_path = os.path.join(self._rootdir, sid_subdir)
attrs = bcolz.attrs.attrs(sid_path, 'r')
try:
return attrs[name]
except KeyError:
return None
def get_value(self, sid, dt, field):
"""
Retrieve the pricing info for the given sid, dt, and field.
Parameters
----------
sid : int
Asset identifier.
dt : datetime-like
The datetime at which the trade occurred.
field : string
The type of pricing data to retrieve.
('open', 'high', 'low', 'close', 'volume')
Returns
-------
out : float|int
The market data for the given sid, dt, and field coordinates.
For OHLC:
Returns a float if a trade occurred at the given dt.
If no trade occurred, a np.nan is returned.
For volume:
Returns the integer value of the volume.
(A volume of 0 signifies no trades for the given dt.)
"""
if self._last_get_value_dt_value == dt.value:
minute_pos = self._last_get_value_dt_position
else:
try:
minute_pos = self._find_position_of_minute(dt)
except ValueError:
raise NoDataOnDate()
self._last_get_value_dt_value = dt.value
self._last_get_value_dt_position = minute_pos
# a patch for requesting non existing time frames
# due to the different candles labeling + wrong start dates
if minute_pos < 0:
return np.nan
try:
value = self._open_minute_file(field, sid)[minute_pos]
except IndexError:
value = 0
if value == 0:
if field == 'volume':
return 0
else:
return np.nan
# if field != 'volume':
value *= self._ohlc_ratio_inverse_for_sid(sid)
return value
def get_last_traded_dt(self, asset, dt):
minute_pos = self._find_last_traded_position(asset, dt)
if minute_pos == -1:
return pd.NaT
return self._pos_to_minute(minute_pos)
def _find_last_traded_position(self, asset, dt):
volumes = self._open_minute_file('volume', asset)
start_date_minute = asset.start_date.value / NANOS_IN_MINUTE
dt_minute = dt.value / NANOS_IN_MINUTE
try:
# if we know of a dt before which this asset has no volume,
# don't look before that dt
earliest_dt_to_search = self._known_zero_volume_dict[asset.sid]
except KeyError:
earliest_dt_to_search = start_date_minute
if dt_minute < earliest_dt_to_search:
return -1
pos = find_last_traded_position_internal(
self._market_open_values,
self._market_close_values,
dt_minute,
earliest_dt_to_search,
volumes,
self._minutes_per_day,
)
if pos == -1:
# if we didn't find any volume before this dt, save it to avoid
# work in the future.
try:
self._known_zero_volume_dict[asset.sid] = max(
dt_minute,
self._known_zero_volume_dict[asset.sid]
)
except KeyError:
self._known_zero_volume_dict[asset.sid] = dt_minute
return pos
def _pos_to_minute(self, pos):
minute_epoch = minute_value(
self._market_open_values,
pos,
self._minutes_per_day
)
return pd.Timestamp(minute_epoch, tz='UTC', unit="m")
def _find_position_of_minute(self, minute_dt):
"""
Internal method that returns the position of the given minute in the
list of every trading minute since market open of the first trading
day. Adjusts non market minutes to the last close.
ex. this method would return 1 for 2002-01-02 9:32 AM Eastern, if
2002-01-02 is the first trading day of the dataset.
Parameters
----------
minute_dt: pd.Timestamp
The minute whose position should be calculated.
Returns
-------
int: The position of the given minute in the list of all trading
minutes since market open on the first trading day.
"""
return find_position_of_minute(
self._market_open_values,
self._market_close_values,
minute_dt.value / NANOS_IN_MINUTE,
self._minutes_per_day,
False,
)
def load_raw_arrays(self, fields, start_dt, end_dt, sids):
"""
Parameters
----------
fields : list of str
'open', 'high', 'low', 'close', or 'volume'
start_dt: Timestamp
Beginning of the window range.
end_dt: Timestamp
End of the window range.
sids : list of int
The asset identifiers in the window.
Returns
-------
list of np.ndarray
A list with an entry per field of ndarrays with shape
(minutes in range, sids) with a dtype of float64, containing the
values for the respective field over start and end dt range.
"""
start_idx = self._find_position_of_minute(start_dt)
end_idx = self._find_position_of_minute(end_dt)
num_minutes = (end_idx - start_idx + 1)
results = []
indices_to_exclude = self._exclusion_indices_for_range(
start_idx, end_idx)
if indices_to_exclude is not None:
for excl_start, excl_stop in indices_to_exclude:
length = excl_stop - excl_start + 1
num_minutes -= length
shape = num_minutes, len(sids)
for field in fields:
if field != 'volume':
out = np.full(shape, np.nan)
else:
out = np.zeros(shape, dtype=np.float64)
for i, sid in enumerate(sids):
carray = self._open_minute_file(field, sid)
values = carray[start_idx:end_idx + 1]
if indices_to_exclude is not None:
for excl_start, excl_stop in indices_to_exclude[::-1]:
excl_slice = np.s_[excl_start - start_idx:excl_stop
- start_idx + 1]
values = np.delete(values, excl_slice)
where = values != 0
# first slice down to len(where) because we might not have
# written data for all the minutes requested
# if field != 'volume':
out[:len(where), i][where] = (
values[where] * self._ohlc_ratio_inverse_for_sid(sid))
# else:
# out[:len(where), i][where] = values[where]
results.append(out)
return results
class MinuteBarUpdateReader(with_metaclass(ABCMeta, object)):
"""
Abstract base class for minute update readers.
"""
@abstractmethod
def read(self, dts, sids):
"""
Read and return pricing update data.
Parameters
----------
dts : DatetimeIndex
The minutes for which to read the pricing updates.
sids : iter[int]
The sids for which to read the pricing updates.
Returns
-------
data : iter[(int, DataFrame)]
Returns an iterable of ``sid`` to the corresponding OHLCV data.
"""
raise NotImplementedError()
class H5MinuteBarUpdateWriter(object):
"""
Writer for files containing minute bar updates for consumption by a writer
for a ``MinuteBarReader`` format.
Parameters
----------
path : str
The destination path.
complevel : int, optional
The HDF5 complevel, defaults to ``5``.
complib : str, optional
The HDF5 complib, defaults to ``zlib``.
"""
FORMAT_VERSION = 0
_COMPLEVEL = 5
_COMPLIB = 'zlib'
def __init__(self, path, complevel=None, complib=None):
self._complevel = complevel if complevel \
is not None else self._COMPLEVEL
self._complib = complib if complib is not None else self._COMPLIB
self._path = path
def write(self, frames):
"""
Write the frames to the target HDF5 file, using the format used by
``pd.Panel.to_hdf``
Parameters
----------
frames : iter[(int, DataFrame)] or dict[int -> DataFrame]
An iterable or other mapping of sid to the corresponding OHLCV
pricing data.
"""
with HDFStore(self._path, 'w',
complevel=self._complevel, complib=self._complib) \
as store:
panel = pd.Panel.from_dict(dict(frames))
panel.to_hdf(store, 'updates')
with tables.open_file(self._path, mode='r+') as h5file:
h5file.set_node_attr('/', 'version', 0)
class H5MinuteBarUpdateReader(MinuteBarUpdateReader):
"""
Reader for minute bar updates stored in HDF5 files.
Parameters
----------
path : str
The path of the HDF5 file from which to source data.
"""
def __init__(self, path):
self._panel = pd.read_hdf(path)
def read(self, dts, sids):
panel = self._panel[sids, dts, :]
return panel.iteritems()
| [
"[email protected]"
] | |
284fb1b51ef430201f817392977842c1cd80a739 | 49f61714a6f78d984fd2194d6064d84e891bc5b7 | /2019-1/231/users/4237/codes/1796_1613.py | 34c1d6c7aa6950bb644b8a059a48fc732ab82aae | [] | no_license | psbarros/Variaveis3 | b5c4e1517e7d94a846ee03791d25d5821a1c651c | 3dcf6f810709ce03c78335acf9533e008a2ae125 | refs/heads/master | 2023-06-13T07:05:00.878430 | 2021-07-06T17:51:37 | 2021-07-06T17:51:37 | 383,549,597 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 201 | py | from numpy import*
vet = input("Digite a atividade fisica ai: ")
tempo = array(eval(input("E o tempo: ")))
i = 0
if(vet == 'ALONGAMENTO'):
total = 3*tempo
i = i + 1
elif(vet == 'CORRIDA')
total | [
"[email protected]"
] | |
175287ff552e87d65033887d77371e0b868bf754 | 11ad22552bf6719214239a95d032f5559bf57dc5 | /eventregistry/QueryStory.py | c139a651db188cdf76bd89d15031f14e17ff796f | [
"MIT"
] | permissive | arunkumar6545/event-registry-python | 803914d00a1ce6dda40a1673debac75222b0614e | 2ccff7799a03f71189ed666f25f3eb673c1a8263 | refs/heads/master | 2020-09-10T15:46:57.804243 | 2019-11-16T16:57:06 | 2019-11-16T16:57:06 | 221,745,070 | 0 | 0 | MIT | 2019-11-14T16:52:00 | 2019-11-14T16:51:59 | null | UTF-8 | Python | false | false | 6,706 | py | from eventregistry.Base import *
from eventregistry.ReturnInfo import *
class QueryStory(Query):
"""
Class for obtaining available info for one or more stories (clusters) in the Event Registry
NOTE: Story in our terminology is a cluster of articles (and not a single article). An event is
then something that consists of one or more stories (typically in different languages).
@param storyUriOrList: a single story uri or a list of story uris
"""
def __init__(self, storyUriOrList = None):
super(QueryStory, self).__init__()
self._setVal("action", "getStory")
if storyUriOrList != None:
self.queryByUri(storyUriOrList)
def _getPath(self):
return "/api/v1/story"
def queryByUri(self, uriOrUriList):
"""search stories by their uri(s)"""
self._setVal("storyUri", uriOrUriList)
def setRequestedResult(self, requestStory):
"""
Set the single result type that you would like to be returned. If some other request type was previously set, it will be overwritten.
Result types can be the classes that extend RequestStory base class (see classes below).
"""
assert isinstance(requestStory, RequestStory), "QueryStory class can only accept result requests that are of type RequestStory"
self.resultTypeList = [requestStory]
class RequestStory:
def __init__(self):
self.resultType = None
def getResultType(self):
return self.resultType
class RequestStoryInfo(RequestStory):
"""
return details about a story
"""
def __init__(self, returnInfo = ReturnInfo()):
self.resultType = "info"
self.__dict__.update(returnInfo.getParams("info"))
class RequestStoryArticles(RequestStory):
"""
return articles about the story
"""
def __init__(self,
page = 1,
count = 100,
sortBy = "cosSim", sortByAsc = False,
returnInfo = ReturnInfo(articleInfo = ArticleInfoFlags(bodyLen = 200))):
"""
return articles in the story (cluster)
@param page: page of the articles to return (1, 2, ...)
@param count: number of articles to return per page (at most 100)
@param sortBy: order in which articles are sorted. Options: id (internal id), date (published date), cosSim (closeness to event centroid), sourceImportanceRank (importance of the news source, custom set), sourceAlexaGlobalRank (global rank of the news source), sourceAlexaCountryRank (country rank of the news source), socialScore (total shares in social media)
@param sortByAsc: should the articles be sorted in ascending order (True) or descending (False) based on sortBy value
@param returnInfo: what details should be included in the returned information
"""
assert page >= 1, "page has to be >= 1"
assert count <= 100
self.resultType = "articles"
self.articlesPage = page
self.articlesCount = count
self.articlesSortBy = sortBy
self.articlesSortByAsc = sortByAsc
self.__dict__.update(returnInfo.getParams("articles"))
class RequestStoryArticleUris(RequestStory):
"""
return a list of article uris
"""
def __init__(self,
sortBy = "cosSim", sortByAsc = False # order in which story articles are sorted. Options: id (internal id), date (published date), cosSim (closeness to story centroid), socialScore (total shares in social media), facebookShares (shares on fb), twitterShares (shares on twitter)
):
"""
return articles in the story (cluster)
@param sortBy: order in which articles are sorted. Options: id (internal id), date (published date), cosSim (closeness to event centroid), sourceImportanceRank (importance of the news source, custom set), sourceAlexaGlobalRank (global rank of the news source), sourceAlexaCountryRank (country rank of the news source), socialScore (total shares in social media)
@param sortByAsc: should the articles be sorted in ascending order (True) or descending (False) based on sortBy value
"""
self.articleUrisSortBy = sortBy
self.articleUrisSortByAsc = sortByAsc
self.resultType = "articleUris"
class RequestStoryArticleTrend(RequestStory):
"""
return trending information for the articles about the story
"""
def __init__(self,
lang = mainLangs,
minArticleCosSim = -1,
returnInfo = ReturnInfo(articleInfo = ArticleInfoFlags(bodyLen = 0))):
self.resultType = "articleTrend"
self.articleTrendLang = lang
self.articleTrendMinArticleCosSim = minArticleCosSim
self.__dict__.update(returnInfo.getParams("articleTrend"))
class RequestStorySimilarStories(RequestStory):
"""
compute and return a list of similar stories
@param conceptInfoList: array of concepts and their importance, e.g. [{ "uri": "http://en.wikipedia.org/wiki/Barack_Obama", "wgt": 100 }, ...]
@param count: number of similar stories to return (at most 50)
@param dateStart: what can be the oldest date of the similar stories
@param dateEnd: what can be the newest date of the similar stories
@param addArticleTrendInfo: for the returned stories compute how they were trending (intensity of reporting) in different time periods
@param aggrHours: time span that is used as a unit when computing the trending info
@param returnInfo: what details should be included in the returned information
"""
def __init__(self,
conceptInfoList,
count=50, # number of similar stories to return
dateStart = None, # what can be the oldest date of the similar stories
dateEnd = None, # what can be the newest date of the similar stories
lang = [],
returnInfo = ReturnInfo()):
assert count <= 50
assert isinstance(conceptInfoList, list)
self.action = "getSimilarStories"
self.concepts = json.dumps(conceptInfoList)
self.storiesCount = count
if dateStart != None:
self.dateStart = QueryParamsBase.encodeDate(dateStart)
if dateEnd != None:
self.dateEnd = QueryParamsBase.encodeDate(dateEnd)
if len(lang) > 0:
self.lang = lang
# setting resultType since we have to, but it's actually ignored on the backend
self.resultType = "similarStories"
self.__dict__.update(returnInfo.getParams("similarStories"))
| [
"[email protected]"
] | |
3fcb6194f404ce704465066f56f9f62584820530 | 7a73fef9ae426c48573bae41447cef7cb2b97bf6 | /dynamicserialize/dstypes/com/raytheon/uf/common/dataplugin/events/__init__.py | 1be0370374c0659b08731093f6f1d86cacca7bb6 | [
"LicenseRef-scancode-public-domain",
"BSD-3-Clause"
] | permissive | mjames-upc/python-awips | 7f0a80a04457224c9e195b82a95eef4d9b2b3091 | e2b05f5587b02761df3b6dd5c6ee1f196bd5f11c | refs/heads/master | 2020-03-31T03:00:49.540816 | 2018-10-05T23:15:42 | 2018-10-05T23:15:42 | 53,707,817 | 0 | 0 | null | 2017-04-12T18:00:59 | 2016-03-12T01:46:57 | Python | UTF-8 | Python | false | false | 101 | py | ##
##
# File auto-generated by PythonFileGenerator
__all__ = [
'hazards'
]
| [
"[email protected]"
] | |
f5c72b1f3b8cbbe42360e74a8ca4056e885d0bab | ec760cb774a45a12d40529036b7fca3dd589223c | /services/TS29222_CAPIF_Routing_Info_API/capif_routing_info/models/ipv4_address_range.py | 8bf453284830c0d9696da8ccd2b8deea4ae98178 | [
"Apache-2.0"
] | permissive | EVOLVED-5G/CAPIF_API_Services | e4d7f8c7fc9a69aa364787471c5bd54d51fd1cb8 | c907c68d54adf3e3ad7be15ac6707b8c64a1b778 | refs/heads/develop | 2023-07-29T09:31:23.176795 | 2023-05-31T12:56:33 | 2023-05-31T12:56:33 | 416,657,882 | 15 | 5 | Apache-2.0 | 2023-09-04T12:01:57 | 2021-10-13T08:46:52 | Python | UTF-8 | Python | false | false | 3,479 | py | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from capif_routing_info.models.base_model_ import Model
import re
from capif_routing_info import util
import re # noqa: E501
class Ipv4AddressRange(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, start=None, end=None): # noqa: E501
"""Ipv4AddressRange - a model defined in OpenAPI
:param start: The start of this Ipv4AddressRange. # noqa: E501
:type start: str
:param end: The end of this Ipv4AddressRange. # noqa: E501
:type end: str
"""
self.openapi_types = {
'start': str,
'end': str
}
self.attribute_map = {
'start': 'start',
'end': 'end'
}
self._start = start
self._end = end
@classmethod
def from_dict(cls, dikt) -> 'Ipv4AddressRange':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The Ipv4AddressRange of this Ipv4AddressRange. # noqa: E501
:rtype: Ipv4AddressRange
"""
return util.deserialize_model(dikt, cls)
@property
def start(self):
"""Gets the start of this Ipv4AddressRange.
String identifying a IPv4 address formatted in the \"dotted decimal\" notation as defined in RFC 1166. # noqa: E501
:return: The start of this Ipv4AddressRange.
:rtype: str
"""
return self._start
@start.setter
def start(self, start):
"""Sets the start of this Ipv4AddressRange.
String identifying a IPv4 address formatted in the \"dotted decimal\" notation as defined in RFC 1166. # noqa: E501
:param start: The start of this Ipv4AddressRange.
:type start: str
"""
if start is not None and not re.search(r'^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$', start): # noqa: E501
raise ValueError("Invalid value for `start`, must be a follow pattern or equal to `/^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$/`") # noqa: E501
self._start = start
@property
def end(self):
"""Gets the end of this Ipv4AddressRange.
String identifying a IPv4 address formatted in the \"dotted decimal\" notation as defined in RFC 1166. # noqa: E501
:return: The end of this Ipv4AddressRange.
:rtype: str
"""
return self._end
@end.setter
def end(self, end):
"""Sets the end of this Ipv4AddressRange.
String identifying a IPv4 address formatted in the \"dotted decimal\" notation as defined in RFC 1166. # noqa: E501
:param end: The end of this Ipv4AddressRange.
:type end: str
"""
if end is not None and not re.search(r'^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$', end): # noqa: E501
raise ValueError("Invalid value for `end`, must be a follow pattern or equal to `/^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$/`") # noqa: E501
self._end = end
| [
"[email protected]"
] | |
0a23df6de654d24c8cfd4468b5bb389cc25b8f20 | 3a93a50bf80668a6ede701534f1567c3653729b0 | /Full-time-interview-preparation/Graph/redundant_connection.py | 8b8b2bb52cc5b8bd3f503e7f63d43db58f4c181e | [] | no_license | Tadele01/Competitive-Programming | c16778298b6c1b4c0b579aedd1b5f0d4106aceeb | 125de2b4e23f78d2e9f0a8fde90463bed0aed70f | refs/heads/master | 2023-09-01T06:00:09.068940 | 2021-09-13T18:04:30 | 2021-09-13T18:04:30 | 325,728,258 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 649 | py | from typing import List
class Solution:
def findRedundantConnection(self, edges: List[List[int]]) -> List[int]:
self.parents = [-1 for _ in range(len(edges)+1)]
for u,v in edges:
if not self.union(u,v):
return [u,v]
def find(self, x):
while self.parents[x] > 0:
x = self.parents[x]
return x
def union(self, x, y):
x_parent, y_parent = self.find(x), self.find(y)
if x_parent == y_parent:
return False
else:
self.parents[x_parent] = y_parent
return True
| [
"[email protected]"
] | |
d22ab5a62f8a9bbd77dc270f6a368adcf4a6a639 | 9c16d6b984c9a22c219bd2a20a02db21a51ba8d7 | /chrome/test/media_router/media_router_tests.gypi | c1211c49edbec838fba1408930357c7773b8918d | [
"BSD-3-Clause"
] | permissive | nv-chromium/chromium-crosswalk | fc6cc201cb1d6a23d5f52ffd3a553c39acd59fa7 | b21ec2ffe3a13b6a8283a002079ee63b60e1dbc5 | refs/heads/nv-crosswalk-17 | 2022-08-25T01:23:53.343546 | 2019-01-16T21:35:23 | 2019-01-16T21:35:23 | 63,197,891 | 0 | 0 | NOASSERTION | 2019-01-16T21:38:06 | 2016-07-12T22:58:43 | null | UTF-8 | Python | false | false | 1,115 | gypi | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'media_router_integration_test_resources': [
'resources/basic_test.html',
'resources/common.js',
'resources/fail_create_route.html',
'resources/fail_create_route.json',
'resources/fail_join_session.html',
'resources/fail_join_session.json',
'resources/no_provider.html',
'resources/no_provider.json',
],
}, # end of variables
'targets': [
{
'target_name': 'media_router_integration_test_files',
'type': 'none',
'variables': {
'output_dir': '<(PRODUCT_DIR)/media_router/browser_test_resources',
'resource_files': [
'<@(media_router_integration_test_resources)',
]
},
'copies': [
{
'destination': '<(output_dir)',
'files': [
'<@(resource_files)',
],
},
],
}, # end of target 'media_router_integration_test_files'
], # end of targets
}
| [
"[email protected]"
] | |
c49e277824f5d81797c03152796abe6a7b4fb545 | d4cd7da93ef93b32ae30c6c96b0612ffca758c0b | /0x0F-python-object_relational_mapping/5-filter_cities.py | e1edee838b931fa9c1737f3e78038bf11adce3a0 | [] | no_license | jfangwang/holbertonschool-higher_level_programming | afde26b71104b1a0ecb6cb1c99736a5286a51f08 | 32f7396181fac7c7495def24af72346d6ba07249 | refs/heads/master | 2023-04-24T17:16:38.731772 | 2021-05-06T15:49:39 | 2021-05-06T15:49:39 | 319,357,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,165 | py | #!/usr/bin/python3
""" Filter states """
if __name__ == "__main__":
import MySQLdb
import sys
argv = sys.argv
state_id = 0
index = 0
if len(argv) != 5:
print("USAGE: ./0-select_states.py username password\
database_name state_name")
exit()
try:
db = MySQLdb.connect(host="localhost", user=argv[1], charset="utf8",
passwd=argv[2], db=argv[3], port=3306)
except:
print()
# Check for injections
while argv[4][index].isalpha() and index < len(argv[4]) - 1:
index += 1
if argv[4][index].isalpha():
index += 1
argv[4] = argv[4][slice(index)]
cur = db.cursor()
cur.execute("SELECT cities.name FROM cities WHERE cities.state_id\
IN (SELECT states.id FROM states\
WHERE states.name = '{}')\
ORDER BY id ASC".format(argv[4]))
rows = cur.fetchall()
if len(rows) > 0:
for a in range(0, len(rows)):
if len(rows) - 1 > a:
print(rows[a][0], end=', ')
print(rows[a][0])
else:
print()
cur.close()
db.close()
| [
"[email protected]"
] | |
4855001625f98e9d580537235844adc6d370c7db | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2500/60621/258426.py | 9373b12868d7d20dccd2542fe5e09e61dd730079 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | a=list(eval(input()))
b=[x for x in a]
c=[i+1 for i in range(len(a))]
d=[]
for i in range(len(a)):
if a==c:
break
index=a.index(len(a)-i)
if index==len(a)-i-1:
continue
else:
if index+1!=1:
d.append(index+1)
temp=[x for x in a[0:index+1]]
temp.reverse()
a[0:index+1]=temp
d.append(len(a)-i)
temp=[x for x in a[0:len(a)-i]]
temp.reverse()
a[0:len(a)-i]=temp
print(d) | [
"[email protected]"
] | |
ee6d90c3bc03a44f8fa4910336872a00a09153e6 | 9519f459f8622ce209ba61f601df6b567b321f1a | /metrics/migrations/0001_initial.py | 910358f99305fbaf34be89da0e5ba842ae1dc1c1 | [
"MIT"
] | permissive | compsoc-ssc/compsocssc | 3b478b2a9542ac62e0b3a0c8e13b84f289a5eb40 | b61d490077b6ddf4798ce9ac30ca60bc63923080 | refs/heads/master | 2020-04-05T22:57:38.698446 | 2017-08-04T09:25:21 | 2017-08-04T09:25:21 | 30,790,913 | 7 | 8 | MIT | 2017-12-29T10:43:21 | 2015-02-14T08:05:48 | Python | UTF-8 | Python | false | false | 673 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Hit',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ip', models.GenericIPAddressField()),
('ua', models.CharField(max_length=200)),
('stamp', models.DateTimeField(auto_now_add=True)),
],
options={
},
bases=(models.Model,),
),
]
| [
"[email protected]"
] | |
c0a983c0bc0a8a575f4982c5d5e5940b4571ca58 | 28dff466783a11266dd6b6cde9edd36de5c1ae4b | /python/ccxt/async_support/bybit.py | 4dbf295992bb2588651d79c8fadb8d6fb2c0e61e | [
"MIT"
] | permissive | dilongfa/ccxt | 24cd99ab73c3f25b472b7bc9e954b88e8ba19502 | ab5cff84948c3828ff7a234c67b8726c0353e5f6 | refs/heads/master | 2022-05-18T07:07:17.517717 | 2022-04-07T12:46:47 | 2022-04-07T12:46:47 | 98,248,595 | 0 | 0 | MIT | 2019-05-10T08:47:35 | 2017-07-25T01:04:18 | JavaScript | UTF-8 | Python | false | false | 141,317 | py | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
import json
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import InvalidNonce
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class bybit(Exchange):
def describe(self):
return self.deep_extend(super(bybit, self).describe(), {
'id': 'bybit',
'name': 'Bybit',
'countries': ['VG'], # British Virgin Islands
'version': 'v2',
'userAgent': None,
# 50 requests per second for GET requests, 1000ms / 50 = 20ms between requests
# 20 requests per second for POST requests, cost = 50 / 20 = 2.5
'rateLimit': 20,
'hostname': 'bybit.com', # bybit.com, bytick.com
'has': {
'CORS': True,
'spot': True,
'margin': False,
'swap': True,
'future': True,
'option': None,
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRates': False,
'fetchClosedOrders': True,
'fetchDeposits': True,
'fetchFundingRate': True,
'fetchFundingRateHistory': False,
'fetchIndexOHLCV': True,
'fetchLedger': True,
'fetchMarketLeverageTiers': True,
'fetchMarkets': True,
'fetchMarkOHLCV': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchOrderTrades': True,
'fetchPositions': True,
'fetchPremiumIndexOHLCV': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': False,
'fetchTransactions': None,
'fetchWithdrawals': True,
'setLeverage': True,
'setMarginMode': True,
},
'timeframes': {
'1m': '1',
'3m': '3',
'5m': '5',
'15m': '15',
'30m': '30',
'1h': '60',
'2h': '120',
'4h': '240',
'6h': '360',
'12h': '720',
'1d': 'D',
'1w': 'W',
'1M': 'M',
'1y': 'Y',
},
'urls': {
'test': {
'spot': 'https://api-testnet.{hostname}',
'futures': 'https://api-testnet.{hostname}',
'v2': 'https://api-testnet.{hostname}',
'public': 'https://api-testnet.{hostname}',
'private': 'https://api-testnet.{hostname}',
},
'logo': 'https://user-images.githubusercontent.com/51840849/76547799-daff5b80-649e-11ea-87fb-3be9bac08954.jpg',
'api': {
'spot': 'https://api.{hostname}',
'futures': 'https://api.{hostname}',
'v2': 'https://api.{hostname}',
'public': 'https://api.{hostname}',
'private': 'https://api.{hostname}',
},
'www': 'https://www.bybit.com',
'doc': [
'https://bybit-exchange.github.io/docs/inverse/',
'https://bybit-exchange.github.io/docs/linear/',
'https://github.com/bybit-exchange',
],
'fees': 'https://help.bybit.com/hc/en-us/articles/360039261154',
'referral': 'https://www.bybit.com/app/register?ref=X7Prm',
},
'api': {
# outdated endpoints -----------------------------------------
'spot': {
'public': {
'get': [
'symbols',
],
},
'quote': {
'get': [
'depth',
'depth/merged',
'trades',
'kline',
'ticker/24hr',
'ticker/price',
'ticker/book_ticker',
],
},
'private': {
'get': [
'order',
'open-orders',
'history-orders',
'myTrades',
'account',
'time',
],
'post': [
'order',
],
'delete': [
'order',
'order/fast',
],
},
'order': {
'delete': [
'batch-cancel',
'batch-fast-cancel',
'batch-cancel-by-ids',
],
},
},
'futures': {
'private': {
'get': [
'order/list',
'order',
'stop-order/list',
'stop-order',
'position/list',
'execution/list',
'trade/closed-pnl/list',
],
'post': [
'order/create',
'order/cancel',
'order/cancelAll',
'order/replace',
'stop-order/create',
'stop-order/cancel',
'stop-order/cancelAll',
'stop-order/replace',
'position/change-position-margin',
'position/trading-stop',
'position/leverage/save',
'position/switch-mode',
'position/switch-isolated',
'position/risk-limit',
],
},
},
'v2': {
'public': {
'get': [
'orderBook/L2',
'kline/list',
'tickers',
'trading-records',
'symbols',
'liq-records',
'mark-price-kline',
'index-price-kline',
'premium-index-kline',
'open-interest',
'big-deal',
'account-ratio',
'time',
'announcement',
'funding/prev-funding-rate',
'risk-limit/list',
],
},
'private': {
'get': [
'order/list',
'order',
'stop-order/list',
'stop-order',
'position/list',
'position/fee-rate',
'execution/list',
'trade/closed-pnl/list',
'funding/prev-funding-rate',
'funding/prev-funding',
'funding/predicted-funding',
'account/api-key',
'account/lcp',
'wallet/balance',
'wallet/fund/records',
'wallet/withdraw/list',
'exchange-order/list',
],
'post': [
'order/create',
'order/cancel',
'order/cancelAll',
'order/replace',
'stop-order/create',
'stop-order/cancel',
'stop-order/cancelAll',
'stop-order/replace',
'position/change-position-margin',
'position/trading-stop',
'position/leverage/save',
'position/switch-mode',
'position/switch-isolated',
'position/risk-limit',
],
},
},
# new endpoints ------------------------------------------
'public': {
'get': {
# inverse swap
'v2/public/orderBook/L2': 1,
'v2/public/kline/list': 3,
'v2/public/tickers': 1,
'v2/public/trading-records': 1,
'v2/public/symbols': 1,
'v2/public/mark-price-kline': 3,
'v2/public/index-price-kline': 3,
'v2/public/premium-index-kline': 2,
'v2/public/open-interest': 1,
'v2/public/big-deal': 1,
'v2/public/account-ratio': 1,
'v2/public/funding-rate': 1,
'v2/public/elite-ratio': 1,
# linear swap USDT
'public/linear/kline': 3,
'public/linear/recent-trading-records': 1,
'public/linear/funding/prev-funding-rate': 1,
'public/linear/mark-price-kline': 1,
'public/linear/index-price-kline': 1,
'public/linear/premium-index-kline': 1,
# spot
'spot/v1/time': 1,
'spot/v1/symbols': 1,
'spot/quote/v1/depth': 1,
'spot/quote/v1/depth/merged': 1,
'spot/quote/v1/trades': 1,
'spot/quote/v1/kline': 1,
'spot/quote/v1/ticker/24hr': 1,
'spot/quote/v1/ticker/price': 1,
'spot/quote/v1/ticker/book_ticker': 1,
# data
'v2/public/time': 1,
'v2/public/announcement': 1,
# USDC endpoints are testnet only as of 2022 Jan 11 ----------
# option USDC(testnet only)
'option/usdc/openapi/public/v1/order-book': 1,
'option/usdc/openapi/public/v1/symbols': 1,
'option/usdc/openapi/public/v1/tick': 1,
'option/usdc/openapi/public/v1/delivery-price': 1,
'option/usdc/openapi/public/v1/query-trade-latest': 1,
# perpetual swap USDC(testnet only)
'perpetual/usdc/openapi/public/v1/order-book': 1,
'perpetual/usdc/openapi/public/v1/symbols': 1,
'perpetual/usdc/openapi/public/v1/tick': 1,
'perpetual/usdc/openapi/public/v1/kline/list': 1,
'perpetual/usdc/openapi/public/v1/mark-price-kline': 1,
'perpetual/usdc/openapi/public/v1/index-price-kline': 1,
'perpetual/usdc/openapi/public/v1/premium-index-kline': 1,
'perpetual/usdc/openapi/public/v1/open-interest': 1,
'perpetual/usdc/openapi/public/v1/big-deal': 1,
'perpetual/usdc/openapi/public/v1/account-ratio': 1,
},
# outdated endpoints--------------------------------------
'linear': {
'get': [
'kline',
'recent-trading-records',
'funding/prev-funding-rate',
'mark-price-kline',
'index-price-kline',
'premium-index-kline',
'risk-limit',
],
},
},
# new endpoints ------------------------------------------
'private': {
'get': {
# inverse swap
'v2/private/order/list': 5,
'v2/private/order': 5,
'v2/private/stop-order/list': 5,
'v2/private/stop-order': 1,
'v2/private/position/list': 25,
'v2/private/position/fee-rate': 40,
'v2/private/execution/list': 25,
'v2/private/trade/closed-pnl/list': 1,
'v2/public/risk-limit/list': 1, # TODO check
'v2/public/funding/prev-funding-rate': 25, # TODO check
'v2/private/funding/prev-funding': 25,
'v2/private/funding/predicted-funding': 25,
'v2/private/account/api-key': 5,
'v2/private/account/lcp': 1,
'v2/private/wallet/balance': 25, # 120 per minute = 2 per second => cost = 50 / 2 = 25
'v2/private/wallet/fund/records': 25,
'v2/private/wallet/withdraw/list': 25,
'v2/private/exchange-order/list': 1,
# linear swap USDT
'private/linear/order/list': 5, # 600 per minute = 10 per second => cost = 50 / 10 = 5
'private/linear/order/search': 5,
'private/linear/stop-order/list': 5,
'private/linear/stop-order/search': 5,
'private/linear/position/list': 25,
'private/linear/trade/execution/list': 25,
'private/linear/trade/closed-pnl/list': 25,
'public/linear/risk-limit': 1,
'private/linear/funding/predicted-funding': 25,
'private/linear/funding/prev-funding': 25,
# inverse futures
'futures/private/order/list': 5,
'futures/private/order': 5,
'futures/private/stop-order/list': 5,
'futures/private/stop-order': 5,
'futures/private/position/list': 25,
'futures/private/execution/list': 25,
'futures/private/trade/closed-pnl/list': 1,
# spot
'spot/v1/account': 2.5,
'spot/v1/order': 2.5,
'spot/v1/open-orders': 2.5,
'spot/v1/history-orders': 2.5,
'spot/v1/myTrades': 2.5,
# account
'asset/v1/private/transfer/list': 50, # 60 per minute = 1 per second => cost = 50 / 1 = 50
'asset/v1/private/sub-member/transfer/list': 50,
'asset/v1/private/sub-member/member-ids': 50,
},
'post': {
# inverse swap
'v2/private/order/create': 30,
'v2/private/order/cancel': 30,
'v2/private/order/cancelAll': 300, # 100 per minute + 'consumes 10 requests'
'v2/private/order/replace': 30,
'v2/private/stop-order/create': 30,
'v2/private/stop-order/cancel': 30,
'v2/private/stop-order/cancelAll': 300,
'v2/private/stop-order/replace': 30,
'v2/private/position/change-position-margin': 40,
'v2/private/position/trading-stop': 40,
'v2/private/position/leverage/save': 40,
'v2/private/tpsl/switch-mode': 40,
'v2/private/position/switch-isolated': 2.5,
'v2/private/position/risk-limit': 2.5,
'v2/private/position/switch-mode': 2.5,
# linear swap USDT
'private/linear/order/create': 30, # 100 per minute = 1.666 per second => cost = 50 / 1.6666 = 30
'private/linear/order/cancel': 30,
'private/linear/order/cancel-all': 300, # 100 per minute + 'consumes 10 requests'
'private/linear/order/replace': 30,
'private/linear/stop-order/create': 30,
'private/linear/stop-order/cancel': 30,
'private/linear/stop-order/cancel-all': 300,
'private/linear/stop-order/replace': 30,
'private/linear/position/set-auto-add-margin': 40,
'private/linear/position/switch-isolated': 40,
'private/linear/position/switch-mode': 40,
'private/linear/tpsl/switch-mode': 2.5,
'private/linear/position/add-margin': 40,
'private/linear/position/set-leverage': 40, # 75 per minute = 1.25 per second => cost = 50 / 1.25 = 40
'private/linear/position/trading-stop': 40,
'private/linear/position/set-risk': 2.5,
# inverse futures
'futures/private/order/create': 30,
'futures/private/order/cancel': 30,
'futures/private/order/cancelAll': 30,
'futures/private/order/replace': 30,
'futures/private/stop-order/create': 30,
'futures/private/stop-order/cancel': 30,
'futures/private/stop-order/cancelAll': 30,
'futures/private/stop-order/replace': 30,
'futures/private/position/change-position-margin': 40,
'futures/private/position/trading-stop': 40,
'futures/private/position/leverage/save': 40,
'futures/private/position/switch-mode': 40,
'futures/private/tpsl/switch-mode': 40,
'futures/private/position/switch-isolated': 40,
'futures/private/position/risk-limit': 2.5,
# spot
'spot/v1/order': 2.5,
# account
'asset/v1/private/transfer': 150, # 20 per minute = 0.333 per second => cost = 50 / 0.3333 = 150
'asset/v1/private/sub-member/transfer': 150,
# USDC endpoints are testnet only as of 2022 Jan 11 ----------
# option USDC(testnet only)
'option/usdc/openapi/private/v1/place-order': 2.5,
'option/usdc/openapi/private/v1/batch-place-order': 2.5,
'option/usdc/openapi/private/v1/replace-order': 2.5,
'option/usdc/openapi/private/v1/batch-replace-orders': 2.5,
'option/usdc/openapi/private/v1/cancel-order': 2.5,
'option/usdc/openapi/private/v1/batch-cancel-orders': 2.5,
'option/usdc/openapi/private/v1/cancel-all': 2.5,
'option/usdc/openapi/private/v1/query-active-orders': 2.5,
'option/usdc/openapi/private/v1/query-order-history': 2.5,
'option/usdc/openapi/private/v1/execution-list': 2.5,
'option/usdc/openapi/private/v1/query-transaction-log': 2.5,
'option/usdc/openapi/private/v1/query-wallet-balance': 2.5,
'option/usdc/openapi/private/v1/query-asset-info': 2.5,
'option/usdc/openapi/private/v1/query-margin-info': 2.5,
'option/usdc/openapi/private/v1/query-position': 2.5,
'option/usdc/openapi/private/v1/query-delivery-list': 2.5,
'option/usdc/openapi/private/v1/query-position-exp-date': 2.5,
'option/usdc/openapi/private/v1/mmp-modify': 2.5,
'option/usdc/openapi/private/v1/mmp-reset': 2.5,
# perpetual swap USDC(testnet only)
'perpetual/usdc/openapi/private/v1/place-order': 2.5,
'perpetual/usdc/openapi/private/v1/replace-order': 2.5,
'perpetual/usdc/openapi/private/v1/cancel-order': 2.5,
'perpetual/usdc/openapi/private/v1/cancel-all': 2.5,
'perpetual/usdc/openapi/private/v1/position/leverage/save': 2.5,
'option/usdc/openapi/private/v1/session-settlement': 2.5,
'perpetual/usdc/openapi/public/v1/risk-limit/list': 2.5,
'perpetual/usdc/openapi/private/v1/position/set-risk-limit': 2.5,
},
'delete': {
# spot
'spot/v1/order': 2.5,
'spot/v1/order/fast': 2.5,
'spot/order/batch-cancel': 2.5,
'spot/order/batch-fast-cancel': 2.5,
'spot/order/batch-cancel-by-ids': 2.5,
},
# outdated endpoints -------------------------------------
'linear': {
'get': [
'order/list',
'order/search',
'stop-order/list',
'stop-order/search',
'position/list',
'trade/execution/list',
'trade/closed-pnl/list',
'funding/predicted-funding',
'funding/prev-funding',
],
'post': [
'order/create',
'order/cancel',
'order/cancel-all',
'order/replace',
'stop-order/create',
'stop-order/cancel',
'stop-order/cancel-all',
'stop-order/replace',
'position/set-auto-add-margin',
'position/switch-isolated',
'position/switch-mode',
'tpsl/switch-mode',
'position/add-margin',
'position/set-leverage',
'position/trading-stop',
'position/set-risk',
],
},
},
},
'httpExceptions': {
'403': RateLimitExceeded, # Forbidden -- You request too many times
},
'exceptions': {
'exact': {
'-2015': AuthenticationError, # Invalid API-key, IP, or permissions for action.
'10001': BadRequest, # parameter error
'10002': InvalidNonce, # request expired, check your timestamp and recv_window
'10003': AuthenticationError, # Invalid apikey
'10004': AuthenticationError, # invalid sign
'10005': PermissionDenied, # permission denied for current apikey
'10006': RateLimitExceeded, # too many requests
'10007': AuthenticationError, # api_key not found in your request parameters
'10010': PermissionDenied, # request ip mismatch
'10017': BadRequest, # request path not found or request method is invalid
'10018': RateLimitExceeded, # exceed ip rate limit
'20001': OrderNotFound, # Order not exists
'20003': InvalidOrder, # missing parameter side
'20004': InvalidOrder, # invalid parameter side
'20005': InvalidOrder, # missing parameter symbol
'20006': InvalidOrder, # invalid parameter symbol
'20007': InvalidOrder, # missing parameter order_type
'20008': InvalidOrder, # invalid parameter order_type
'20009': InvalidOrder, # missing parameter qty
'20010': InvalidOrder, # qty must be greater than 0
'20011': InvalidOrder, # qty must be an integer
'20012': InvalidOrder, # qty must be greater than zero and less than 1 million
'20013': InvalidOrder, # missing parameter price
'20014': InvalidOrder, # price must be greater than 0
'20015': InvalidOrder, # missing parameter time_in_force
'20016': InvalidOrder, # invalid value for parameter time_in_force
'20017': InvalidOrder, # missing parameter order_id
'20018': InvalidOrder, # invalid date format
'20019': InvalidOrder, # missing parameter stop_px
'20020': InvalidOrder, # missing parameter base_price
'20021': InvalidOrder, # missing parameter stop_order_id
'20022': BadRequest, # missing parameter leverage
'20023': BadRequest, # leverage must be a number
'20031': BadRequest, # leverage must be greater than zero
'20070': BadRequest, # missing parameter margin
'20071': BadRequest, # margin must be greater than zero
'20084': BadRequest, # order_id or order_link_id is required
'30001': BadRequest, # order_link_id is repeated
'30003': InvalidOrder, # qty must be more than the minimum allowed
'30004': InvalidOrder, # qty must be less than the maximum allowed
'30005': InvalidOrder, # price exceeds maximum allowed
'30007': InvalidOrder, # price exceeds minimum allowed
'30008': InvalidOrder, # invalid order_type
'30009': ExchangeError, # no position found
'30010': InsufficientFunds, # insufficient wallet balance
'30011': PermissionDenied, # operation not allowed as position is undergoing liquidation
'30012': PermissionDenied, # operation not allowed as position is undergoing ADL
'30013': PermissionDenied, # position is in liq or adl status
'30014': InvalidOrder, # invalid closing order, qty should not greater than size
'30015': InvalidOrder, # invalid closing order, side should be opposite
'30016': ExchangeError, # TS and SL must be cancelled first while closing position
'30017': InvalidOrder, # estimated fill price cannot be lower than current Buy liq_price
'30018': InvalidOrder, # estimated fill price cannot be higher than current Sell liq_price
'30019': InvalidOrder, # cannot attach TP/SL params for non-zero position when placing non-opening position order
'30020': InvalidOrder, # position already has TP/SL params
'30021': InvalidOrder, # cannot afford estimated position_margin
'30022': InvalidOrder, # estimated buy liq_price cannot be higher than current mark_price
'30023': InvalidOrder, # estimated sell liq_price cannot be lower than current mark_price
'30024': InvalidOrder, # cannot set TP/SL/TS for zero-position
'30025': InvalidOrder, # trigger price should bigger than 10% of last price
'30026': InvalidOrder, # price too high
'30027': InvalidOrder, # price set for Take profit should be higher than Last Traded Price
'30028': InvalidOrder, # price set for Stop loss should be between Liquidation price and Last Traded Price
'30029': InvalidOrder, # price set for Stop loss should be between Last Traded Price and Liquidation price
'30030': InvalidOrder, # price set for Take profit should be lower than Last Traded Price
'30031': InsufficientFunds, # insufficient available balance for order cost
'30032': InvalidOrder, # order has been filled or cancelled
'30033': RateLimitExceeded, # The number of stop orders exceeds maximum limit allowed
'30034': OrderNotFound, # no order found
'30035': RateLimitExceeded, # too fast to cancel
'30036': ExchangeError, # the expected position value after order execution exceeds the current risk limit
'30037': InvalidOrder, # order already cancelled
'30041': ExchangeError, # no position found
'30042': InsufficientFunds, # insufficient wallet balance
'30043': InvalidOrder, # operation not allowed as position is undergoing liquidation
'30044': InvalidOrder, # operation not allowed as position is undergoing AD
'30045': InvalidOrder, # operation not allowed as position is not normal status
'30049': InsufficientFunds, # insufficient available balance
'30050': ExchangeError, # any adjustments made will trigger immediate liquidation
'30051': ExchangeError, # due to risk limit, cannot adjust leverage
'30052': ExchangeError, # leverage can not less than 1
'30054': ExchangeError, # position margin is invalid
'30057': ExchangeError, # requested quantity of contracts exceeds risk limit
'30063': ExchangeError, # reduce-only rule not satisfied
'30067': InsufficientFunds, # insufficient available balance
'30068': ExchangeError, # exit value must be positive
'30074': InvalidOrder, # can't create the stop order, because you expect the order will be triggered when the LastPrice(or IndexPrice、 MarkPrice, determined by trigger_by) is raising to stop_px, but the LastPrice(or IndexPrice、 MarkPrice) is already equal to or greater than stop_px, please adjust base_price or stop_px
'30075': InvalidOrder, # can't create the stop order, because you expect the order will be triggered when the LastPrice(or IndexPrice、 MarkPrice, determined by trigger_by) is falling to stop_px, but the LastPrice(or IndexPrice、 MarkPrice) is already equal to or less than stop_px, please adjust base_price or stop_px
'30078': ExchangeError, # {"ret_code":30078,"ret_msg":"","ext_code":"","ext_info":"","result":null,"time_now":"1644853040.916000","rate_limit_status":73,"rate_limit_reset_ms":1644853040912,"rate_limit":75}
# '30084': BadRequest, # Isolated not modified, see handleErrors below
'33004': AuthenticationError, # apikey already expired
'34026': ExchangeError, # the limit is no change
'130021': InsufficientFunds, # {"ret_code":130021,"ret_msg":"orderfix price failed for CannotAffordOrderCost.","ext_code":"","ext_info":"","result":null,"time_now":"1644588250.204878","rate_limit_status":98,"rate_limit_reset_ms":1644588250200,"rate_limit":100}
},
'broad': {
'unknown orderInfo': OrderNotFound, # {"ret_code":-1,"ret_msg":"unknown orderInfo","ext_code":"","ext_info":"","result":null,"time_now":"1584030414.005545","rate_limit_status":99,"rate_limit_reset_ms":1584030414003,"rate_limit":100}
'invalid api_key': AuthenticationError, # {"ret_code":10003,"ret_msg":"invalid api_key","ext_code":"","ext_info":"","result":null,"time_now":"1599547085.415797"}
},
},
'precisionMode': TICK_SIZE,
'options': {
'marketTypes': {
'BTC/USDT': 'linear',
'ETH/USDT': 'linear',
'BNB/USDT': 'linear',
'ADA/USDT': 'linear',
'DOGE/USDT': 'linear',
'XRP/USDT': 'linear',
'DOT/USDT': 'linear',
'UNI/USDT': 'linear',
'BCH/USDT': 'linear',
'LTC/USDT': 'linear',
'SOL/USDT': 'linear',
'LINK/USDT': 'linear',
'MATIC/USDT': 'linear',
'ETC/USDT': 'linear',
'FIL/USDT': 'linear',
'EOS/USDT': 'linear',
'AAVE/USDT': 'linear',
'XTZ/USDT': 'linear',
'SUSHI/USDT': 'linear',
'XEM/USDT': 'linear',
'BTC/USD': 'inverse',
'ETH/USD': 'inverse',
'EOS/USD': 'inverse',
'XRP/USD': 'inverse',
},
'defaultType': 'linear', # linear, inverse, futures
#
# ^
# |
# | self will be replaced with the following soon |
# |
# v
#
# 'defaultType': 'swap', # swap, spot, future, option
'code': 'BTC',
'cancelAllOrders': {
# 'method': 'v2PrivatePostOrderCancelAll', # v2PrivatePostStopOrderCancelAll
},
'recvWindow': 5 * 1000, # 5 sec default
'timeDifference': 0, # the difference between system clock and exchange server clock
'adjustForTimeDifference': False, # controls the adjustment logic upon instantiation
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'taker': 0.00075,
'maker': -0.00025,
},
'funding': {
'tierBased': False,
'percentage': False,
'withdraw': {},
'deposit': {},
},
},
})
def nonce(self):
return self.milliseconds() - self.options['timeDifference']
async def fetch_time(self, params={}):
response = await self.publicGetV2PublicTime(params)
#
# {
# ret_code: 0,
# ret_msg: 'OK',
# ext_code: '',
# ext_info: '',
# result: {},
# time_now: '1583933682.448826'
# }
#
return self.safe_timestamp(response, 'time_now')
async def fetch_markets(self, params={}):
if self.options['adjustForTimeDifference']:
await self.load_time_difference()
response = await self.publicGetV2PublicSymbols(params)
#
# linear swaps and inverse swaps and futures
# swapsResponse = await self.publicGetV2PublicSymbols(params)
#
# {
# "ret_code":0,
# "ret_msg":"OK",
# "ext_code":"",
# "ext_info":"",
# "result":[
# # inverse swap
# {
# "name":"BTCUSD",
# "alias":"BTCUSD",
# "status":"Trading",
# "base_currency":"BTC",
# "quote_currency":"USD",
# "price_scale":2,
# "taker_fee":"0.00075",
# "maker_fee":"-0.00025",
# "leverage_filter":{"min_leverage":1,"max_leverage":100,"leverage_step":"0.01"},
# "price_filter":{"min_price":"0.5","max_price":"999999","tick_size":"0.5"},
# "lot_size_filter":{"max_trading_qty":1000000,"min_trading_qty":1,"qty_step":1}
# },
# # linear swap
# {
# "name":"BTCUSDT",
# "alias":"BTCUSDT",
# "status":"Trading",
# "base_currency":"BTC",
# "quote_currency":"USDT",
# "price_scale":2,
# "taker_fee":"0.00075",
# "maker_fee":"-0.00025",
# "leverage_filter":{"min_leverage":1,"max_leverage":100,"leverage_step":"0.01"},
# "price_filter":{"min_price":"0.5","max_price":"999999","tick_size":"0.5"},
# "lot_size_filter":{"max_trading_qty":100,"min_trading_qty":0.001, "qty_step":0.001}
# },
# # inverse futures
# {
# "name":"BTCUSDM22",
# "alias":"BTCUSD0624",
# "status":"Trading",
# "base_currency":"BTC",
# "quote_currency":"USD",
# "price_scale":2,
# "taker_fee":"0.00075",
# "maker_fee":"-0.00025",
# "leverage_filter":{"min_leverage":1,"max_leverage":100,"leverage_step":"0.01"},
# "price_filter":{"min_price":"0.5","max_price":"999999","tick_size":"0.5"},
# "lot_size_filter":{"max_trading_qty":1000000,"min_trading_qty":1,"qty_step":1}
# },
# {
# "name":"BTCUSDH22",
# "alias":"BTCUSD0325",
# "status":"Trading",
# "base_currency":"BTC",
# "quote_currency":"USD",
# "price_scale":2,
# "taker_fee":"0.00075",
# "maker_fee":"-0.00025",
# "leverage_filter":{"min_leverage":1,"max_leverage":100,"leverage_step":"0.01"}
# "price_filter":{"min_price":"0.5","max_price":"999999","tick_size":"0.5"},
# "lot_size_filter":{"max_trading_qty":1000000,"min_trading_qty":1,"qty_step":1}
# }
# ],
# "time_now":"1642369942.072113"
# }
#
# spot markets
# spotResponse = await self.publicGetSpotV1Symbols(params)
#
# {
# "ret_code":0,
# "ret_msg":"",
# "ext_code":null,
# "ext_info":null,
# "result":[
# {
# "name":"BTCUSDT",
# "alias":"BTCUSDT",
# "baseCurrency":"BTC",
# "quoteCurrency":"USDT",
# "basePrecision":"0.000001",
# "quotePrecision":"0.00000001",
# "minTradeQuantity":"0.000158",
# "minTradeAmount":"10",
# "maxTradeQuantity":"4",
# "maxTradeAmount":"100000",
# "minPricePrecision":"0.01",
# "category":1,
# "showStatus":true
# },
# ]
# }
#
# USDC linear options response
# linearOptionsResponse = await self.publicGetOptionUsdcOpenapiPublicV1Symbols(params)
#
# {
# "retCode":0,
# "retMsg":"success",
# "result":{
# "resultTotalSize":424,
# "cursor":"0%2C500",
# "dataList":[
# {
# "symbol":"BTC-24JUN22-300000-C",
# "status":"ONLINE",
# "baseCoin":"BTC",
# "quoteCoin":"USD",
# "settleCoin":"USDC",
# "takerFee":"0.0003",
# "makerFee":"0.0003",
# "minLeverage":"",
# "maxLeverage":"",
# "leverageStep":"",
# "minOrderPrice":"0.5",
# "maxOrderPrice":"10000000",
# "minOrderSize":"0.01",
# "maxOrderSize":"200",
# "tickSize":"0.5",
# "minOrderSizeIncrement":"0.01",
# "basicDeliveryFeeRate":"0.00015",
# "deliveryTime":"1656057600000"
# },
# {
# "symbol":"BTC-24JUN22-300000-P",
# "status":"ONLINE",
# "baseCoin":"BTC",
# "quoteCoin":"USD",
# "settleCoin":"USDC",
# "takerFee":"0.0003",
# "makerFee":"0.0003",
# "minLeverage":"",
# "maxLeverage":"",
# "leverageStep":"",
# "minOrderPrice":"0.5",
# "maxOrderPrice":"10000000",
# "minOrderSize":"0.01",
# "maxOrderSize":"200",
# "tickSize":"0.5",
# "minOrderSizeIncrement":"0.01",
# "basicDeliveryFeeRate":"0.00015",
# "deliveryTime":"1656057600000"
# },
# ]
# }
# }
#
# USDC linear perpetual swaps
# usdcLinearPerpetualSwaps = await self.publicGetPerpetualUsdcOpenapiPublicV1Symbols(params)
#
# {
# "retCode":0,
# "retMsg":"",
# "result":[
# {
# "symbol":"BTCPERP",
# "status":"ONLINE",
# "baseCoin":"BTC",
# "quoteCoin":"USD",
# "takerFeeRate":"0.00075",
# "makerFeeRate":"-0.00025",
# "minLeverage":"1",
# "maxLeverage":"100",
# "leverageStep":"0.01",
# "minPrice":"0.50",
# "maxPrice":"999999.00",
# "tickSize":"0.50",
# "maxTradingQty":"5.000",
# "minTradingQty":"0.001",
# "qtyStep":"0.001",
# "deliveryFeeRate":"",
# "deliveryTime":"0"
# }
# ]
# }
#
markets = self.safe_value(response, 'result', [])
options = self.safe_value(self.options, 'fetchMarkets', {})
linearQuoteCurrencies = self.safe_value(options, 'linear', {'USDT': True})
result = []
for i in range(0, len(markets)):
market = markets[i]
id = self.safe_string_2(market, 'name', 'symbol')
baseId = self.safe_string_2(market, 'base_currency', 'baseCoin')
quoteId = self.safe_string_2(market, 'quote_currency', 'quoteCoin')
settleId = self.safe_string(market, 'settleCoin')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
settle = self.safe_currency_code(settleId)
linear = (quote in linearQuoteCurrencies)
inverse = not linear
symbol = base + '/' + quote
baseQuote = base + quote
type = 'swap'
if baseQuote != id:
symbol = id
type = 'future'
lotSizeFilter = self.safe_value(market, 'lot_size_filter', {})
priceFilter = self.safe_value(market, 'price_filter', {})
precision = {
'amount': self.safe_number(lotSizeFilter, 'qty_step'),
'price': self.safe_number(priceFilter, 'tick_size'),
}
leverage = self.safe_value(market, 'leverage_filter', {})
status = self.safe_string(market, 'status')
active = None
if status is not None:
active = (status == 'Trading')
spot = (type == 'spot')
swap = (type == 'swap')
future = (type == 'future')
option = (type == 'option')
contract = swap or future or option
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'settle': settle,
'baseId': baseId,
'quoteId': quoteId,
'settleId': settleId,
'active': active,
'precision': precision,
'taker': self.safe_number(market, 'taker_fee'),
'maker': self.safe_number(market, 'maker_fee'),
'type': type,
'spot': spot,
'margin': None, # todo
'contract': contract,
'contractSize': None, # todo
'swap': swap,
'future': future,
'futures': future, # Deprecated, use future
'option': option,
'linear': linear,
'inverse': inverse,
'expiry': None, # todo
'expiryDatetime': None, # todo
'optionType': None,
'strike': None,
'limits': {
'amount': {
'min': self.safe_number(lotSizeFilter, 'min_trading_qty'),
'max': self.safe_number(lotSizeFilter, 'max_trading_qty'),
},
'price': {
'min': self.safe_number(priceFilter, 'min_price'),
'max': self.safe_number(priceFilter, 'max_price'),
},
'cost': {
'min': None,
'max': None,
},
'leverage': {
'max': self.safe_number(leverage, 'max_leverage', 1),
},
},
'info': market,
})
return result
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# symbol: 'BTCUSD',
# bid_price: '7680',
# ask_price: '7680.5',
# last_price: '7680.00',
# last_tick_direction: 'MinusTick',
# prev_price_24h: '7870.50',
# price_24h_pcnt: '-0.024204',
# high_price_24h: '8035.00',
# low_price_24h: '7671.00',
# prev_price_1h: '7780.00',
# price_1h_pcnt: '-0.012853',
# mark_price: '7683.27',
# index_price: '7682.74',
# open_interest: 188829147,
# open_value: '23670.06',
# total_turnover: '25744224.90',
# turnover_24h: '102997.83',
# total_volume: 225448878806,
# volume_24h: 809919408,
# funding_rate: '0.0001',
# predicted_funding_rate: '0.0001',
# next_funding_time: '2020-03-12T00:00:00Z',
# countdown_hour: 7
# }
#
timestamp = None
marketId = self.safe_string(ticker, 'symbol')
symbol = self.safe_symbol(marketId, market)
last = self.safe_string(ticker, 'last_price')
open = self.safe_string(ticker, 'prev_price_24h')
percentage = self.safe_string(ticker, 'price_24h_pcnt')
percentage = Precise.string_mul(percentage, '100')
baseVolume = self.safe_string(ticker, 'turnover_24h')
quoteVolume = self.safe_string(ticker, 'volume_24h')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high_price_24h'),
'low': self.safe_string(ticker, 'low_price_24h'),
'bid': self.safe_string(ticker, 'bid_price'),
'bidVolume': None,
'ask': self.safe_string(ticker, 'ask_price'),
'askVolume': None,
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': percentage,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market, False)
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = await self.publicGetV2PublicTickers(self.extend(request, params))
#
# {
# ret_code: 0,
# ret_msg: 'OK',
# ext_code: '',
# ext_info: '',
# result: [
# {
# symbol: 'BTCUSD',
# bid_price: '7680',
# ask_price: '7680.5',
# last_price: '7680.00',
# last_tick_direction: 'MinusTick',
# prev_price_24h: '7870.50',
# price_24h_pcnt: '-0.024204',
# high_price_24h: '8035.00',
# low_price_24h: '7671.00',
# prev_price_1h: '7780.00',
# price_1h_pcnt: '-0.012853',
# mark_price: '7683.27',
# index_price: '7682.74',
# open_interest: 188829147,
# open_value: '23670.06',
# total_turnover: '25744224.90',
# turnover_24h: '102997.83',
# total_volume: 225448878806,
# volume_24h: 809919408,
# funding_rate: '0.0001',
# predicted_funding_rate: '0.0001',
# next_funding_time: '2020-03-12T00:00:00Z',
# countdown_hour: 7
# }
# ],
# time_now: '1583948195.818255'
# }
#
result = self.safe_value(response, 'result', [])
first = self.safe_value(result, 0)
timestamp = self.safe_timestamp(response, 'time_now')
ticker = self.parse_ticker(first, market)
ticker['timestamp'] = timestamp
ticker['datetime'] = self.iso8601(timestamp)
return ticker
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
response = await self.publicGetV2PublicTickers(params)
#
# {
# ret_code: 0,
# ret_msg: 'OK',
# ext_code: '',
# ext_info: '',
# result: [
# {
# symbol: 'BTCUSD',
# bid_price: '7680',
# ask_price: '7680.5',
# last_price: '7680.00',
# last_tick_direction: 'MinusTick',
# prev_price_24h: '7870.50',
# price_24h_pcnt: '-0.024204',
# high_price_24h: '8035.00',
# low_price_24h: '7671.00',
# prev_price_1h: '7780.00',
# price_1h_pcnt: '-0.012853',
# mark_price: '7683.27',
# index_price: '7682.74',
# open_interest: 188829147,
# open_value: '23670.06',
# total_turnover: '25744224.90',
# turnover_24h: '102997.83',
# total_volume: 225448878806,
# volume_24h: 809919408,
# funding_rate: '0.0001',
# predicted_funding_rate: '0.0001',
# next_funding_time: '2020-03-12T00:00:00Z',
# countdown_hour: 7
# }
# ],
# time_now: '1583948195.818255'
# }
#
result = self.safe_value(response, 'result', [])
tickers = {}
for i in range(0, len(result)):
ticker = self.parse_ticker(result[i])
symbol = ticker['symbol']
tickers[symbol] = ticker
return self.filter_by_array(tickers, 'symbol', symbols)
def parse_ohlcv(self, ohlcv, market=None):
#
# inverse perpetual BTC/USD
#
# {
# symbol: 'BTCUSD',
# interval: '1',
# open_time: 1583952540,
# open: '7760.5',
# high: '7764',
# low: '7757',
# close: '7763.5',
# volume: '1259766',
# turnover: '162.32773718999994'
# }
#
# linear perpetual BTC/USDT
#
# {
# "id":143536,
# "symbol":"BTCUSDT",
# "period":"15",
# "start_at":1587883500,
# "volume":1.035,
# "open":7540.5,
# "high":7541,
# "low":7540.5,
# "close":7541
# }
#
return [
self.safe_timestamp_2(ohlcv, 'open_time', 'start_at'),
self.safe_number(ohlcv, 'open'),
self.safe_number(ohlcv, 'high'),
self.safe_number(ohlcv, 'low'),
self.safe_number(ohlcv, 'close'),
self.safe_number_2(ohlcv, 'volume', 'turnover'),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
price = self.safe_string(params, 'price')
params = self.omit(params, 'price')
request = {
'symbol': market['id'],
'interval': self.timeframes[timeframe],
}
duration = self.parse_timeframe(timeframe)
now = self.seconds()
if since is None:
if limit is None:
raise ArgumentsRequired(self.id + ' fetchOHLCV() requires a since argument or a limit argument')
else:
request['from'] = now - limit * duration
else:
request['from'] = int(since / 1000)
if limit is not None:
request['limit'] = limit # max 200, default 200
method = 'publicGetV2PublicKlineList'
if price == 'mark':
method = 'publicGetV2PublicMarkPriceKline'
elif price == 'index':
method = 'publicGetV2PublicIndexPriceKline'
elif price == 'premiumIndex':
method = 'publicGetV2PublicPremiumIndexKline'
elif market['linear']:
method = 'publicGetPublicLinearKline'
response = await getattr(self, method)(self.extend(request, params))
#
# inverse perpetual BTC/USD
#
# {
# ret_code: 0,
# ret_msg: 'OK',
# ext_code: '',
# ext_info: '',
# result: [
# {
# symbol: 'BTCUSD',
# interval: '1',
# open_time: 1583952540,
# open: '7760.5',
# high: '7764',
# low: '7757',
# close: '7763.5',
# volume: '1259766',
# turnover: '162.32773718999994'
# },
# ],
# time_now: '1583953082.397330'
# }
#
# linear perpetual BTC/USDT
#
# {
# "ret_code":0,
# "ret_msg":"OK",
# "ext_code":"",
# "ext_info":"",
# "result":[
# {
# "id":143536,
# "symbol":"BTCUSDT",
# "period":"15",
# "start_at":1587883500,
# "volume":1.035,
# "open":7540.5,
# "high":7541,
# "low":7540.5,
# "close":7541
# }
# ],
# "time_now":"1587884120.168077"
# }
#
result = self.safe_value(response, 'result', {})
return self.parse_ohlcvs(result, market, timeframe, since, limit)
async def fetch_funding_rate(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
method = 'publicLinearGetFundingPrevFundingRate' if market['linear'] else 'v2PublicGetFundingPrevFundingRate'
# TODO method = 'publicGetPublicLinearFundingPrevFundingRate' if market['linear'] else 'publicGetV2PublicFundingRate ???? throws ExchangeError'
response = await getattr(self, method)(self.extend(request, params))
#
# {
# "ret_code":0,
# "ret_msg":"OK",
# "ext_code":"",
# "ext_info":"",
# "result":{
# "symbol":"BTCUSDT",
# "funding_rate":0.00006418,
# "funding_rate_timestamp":"2022-03-11T16:00:00.000Z"
# },
# "time_now":"1647040818.724895"
# }
#
# {
# "ret_code":0,
# "ret_msg":"OK",
# "ext_code":"",
# "ext_info":"",
# "result":{
# "symbol":"BTCUSD",
# "funding_rate":"0.00009536",
# "funding_rate_timestamp":1647014400
# },
# "time_now":"1647040852.515724"
# }
#
result = self.safe_value(response, 'result')
fundingRate = self.safe_number(result, 'funding_rate')
fundingTimestamp = self.parse8601(self.safe_string(result, 'funding_rate_timestamp'))
fundingTimestamp = self.safe_timestamp(result, 'funding_rate_timestamp', fundingTimestamp)
currentTime = self.milliseconds()
return {
'info': result,
'symbol': symbol,
'markPrice': None,
'indexPrice': None,
'interestRate': None,
'estimatedSettlePrice': None,
'timestamp': currentTime,
'datetime': self.iso8601(currentTime),
'fundingRate': fundingRate,
'fundingTimestamp': fundingTimestamp,
'fundingDatetime': self.iso8601(fundingTimestamp),
'nextFundingRate': None,
'nextFundingTimestamp': None,
'nextFundingDatetime': None,
'previousFundingRate': None,
'previousFundingTimestamp': None,
'previousFundingDatetime': None,
}
async def fetch_index_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
if since is None and limit is None:
raise ArgumentsRequired(self.id + ' fetchIndexOHLCV() requires a since argument or a limit argument')
request = {
'price': 'index',
}
return await self.fetch_ohlcv(symbol, timeframe, since, limit, self.extend(request, params))
async def fetch_mark_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
if since is None and limit is None:
raise ArgumentsRequired(self.id + ' fetchMarkOHLCV() requires a since argument or a limit argument')
request = {
'price': 'mark',
}
return await self.fetch_ohlcv(symbol, timeframe, since, limit, self.extend(request, params))
async def fetch_premium_index_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
if since is None and limit is None:
raise ArgumentsRequired(self.id + ' fetchPremiumIndexOHLCV() requires a since argument or a limit argument')
request = {
'price': 'premiumIndex',
}
return await self.fetch_ohlcv(symbol, timeframe, since, limit, self.extend(request, params))
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "id": "44275042152",
# "symbol": "AAVEUSDT",
# "price": "256.35",
# "qty": "0.1",
# "side": "Buy",
# "time": "2021-11-30T12:46:14.000Z",
# "trade_time_ms": "1638276374312"
# }
#
# fetchMyTrades, fetchOrderTrades(private)
#
# {
# "order_id": "b020b4bc-6fe2-45b5-adbc-dd07794f9746",
# "order_link_id": "",
# "side": "Buy",
# "symbol": "AAVEUSDT",
# "exec_id": "09abe8f0-aea6-514e-942b-7da8cb935120",
# "price": "269.3",
# "order_price": "269.3",
# "order_qty": "0.1",
# "order_type": "Market",
# "fee_rate": "0.00075",
# "exec_price": "256.35",
# "exec_type": "Trade",
# "exec_qty": "0.1",
# "exec_fee": "0.01922625",
# "exec_value": "25.635",
# "leaves_qty": "0",
# "closed_size": "0",
# "last_liquidity_ind": "RemovedLiquidity",
# "trade_time": "1638276374",
# "trade_time_ms": "1638276374312"
# }
#
id = self.safe_string_2(trade, 'id', 'exec_id')
marketId = self.safe_string(trade, 'symbol')
market = self.safe_market(marketId, market)
symbol = market['symbol']
amountString = self.safe_string_2(trade, 'qty', 'exec_qty')
priceString = self.safe_string_2(trade, 'exec_price', 'price')
costString = self.safe_string(trade, 'exec_value')
timestamp = self.parse8601(self.safe_string(trade, 'time'))
if timestamp is None:
timestamp = self.safe_integer(trade, 'trade_time_ms')
side = self.safe_string_lower(trade, 'side')
lastLiquidityInd = self.safe_string(trade, 'last_liquidity_ind')
takerOrMaker = 'maker' if (lastLiquidityInd == 'AddedLiquidity') else 'taker'
feeCostString = self.safe_string(trade, 'exec_fee')
fee = None
if feeCostString is not None:
feeCurrencyCode = market['base'] if market['inverse'] else market['quote']
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
'rate': self.safe_string(trade, 'fee_rate'),
}
return self.safe_trade({
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': self.safe_string(trade, 'order_id'),
'type': self.safe_string_lower(trade, 'order_type'),
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': costString,
'fee': fee,
}, market)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
# 'from': 123, # from id
}
if limit is not None:
request['count'] = limit # default 500, max 1000
method = 'publicGetPublicLinearRecentTradingRecords' if market['linear'] else 'publicGetV2PublicTradingRecords'
response = await getattr(self, method)(self.extend(request, params))
#
# {
# ret_code: 0,
# ret_msg: 'OK',
# ext_code: '',
# ext_info: '',
# result: [
# {
# id: 43785688,
# symbol: 'BTCUSD',
# price: 7786,
# qty: 67,
# side: 'Sell',
# time: '2020-03-11T19:18:30.123Z'
# },
# ],
# time_now: '1583954313.393362'
# }
#
result = self.safe_value(response, 'result', {})
return self.parse_trades(result, market, since, limit)
def parse_order_book(self, orderbook, symbol, timestamp=None, bidsKey='Buy', asksKey='Sell', priceKey='price', amountKey='size'):
bids = []
asks = []
for i in range(0, len(orderbook)):
bidask = orderbook[i]
side = self.safe_string(bidask, 'side')
if side == 'Buy':
bids.append(self.parse_bid_ask(bidask, priceKey, amountKey))
elif side == 'Sell':
asks.append(self.parse_bid_ask(bidask, priceKey, amountKey))
else:
raise ExchangeError(self.id + ' parseOrderBook encountered an unrecognized bidask format: ' + self.json(bidask))
return {
'symbol': symbol,
'bids': self.sort_by(bids, 0, True),
'asks': self.sort_by(asks, 0),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'nonce': None,
}
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = await self.publicGetV2PublicOrderBookL2(self.extend(request, params))
#
# {
# ret_code: 0,
# ret_msg: 'OK',
# ext_code: '',
# ext_info: '',
# result: [
# {symbol: 'BTCUSD', price: '7767.5', size: 677956, side: 'Buy'},
# {symbol: 'BTCUSD', price: '7767', size: 580690, side: 'Buy'},
# {symbol: 'BTCUSD', price: '7766.5', size: 475252, side: 'Buy'},
# {symbol: 'BTCUSD', price: '7768', size: 330847, side: 'Sell'},
# {symbol: 'BTCUSD', price: '7768.5', size: 97159, side: 'Sell'},
# {symbol: 'BTCUSD', price: '7769', size: 6508, side: 'Sell'},
# ],
# time_now: '1583954829.874823'
# }
#
result = self.safe_value(response, 'result', [])
timestamp = self.safe_timestamp(response, 'time_now')
return self.parse_order_book(result, symbol, timestamp, 'Buy', 'Sell', 'price', 'size')
def parse_balance(self, response):
result = {
'info': response,
}
balances = self.safe_value(response, 'result', {})
currencyIds = list(balances.keys())
for i in range(0, len(currencyIds)):
currencyId = currencyIds[i]
balance = balances[currencyId]
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available_balance')
account['used'] = self.safe_string(balance, 'used_margin')
account['total'] = self.safe_string(balance, 'equity')
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
# note: any funds in the 'spot' account will not be returned or visible from self endpoint
await self.load_markets()
request = {}
coin = self.safe_string(params, 'coin')
code = self.safe_string(params, 'code')
if coin is not None:
request['coin'] = coin
elif code is not None:
currency = self.currency(code)
request['coin'] = currency['id']
response = await self.v2PrivateGetWalletBalance(self.extend(request, params))
#
# {
# ret_code: 0,
# ret_msg: 'OK',
# ext_code: '',
# ext_info: '',
# result: {
# BTC: {
# equity: 0,
# available_balance: 0,
# used_margin: 0,
# order_margin: 0,
# position_margin: 0,
# occ_closing_fee: 0,
# occ_funding_fee: 0,
# wallet_balance: 0,
# realised_pnl: 0,
# unrealised_pnl: 0,
# cum_realised_pnl: 0,
# given_cash: 0,
# service_cash: 0
# }
# },
# time_now: '1583937810.370020',
# rate_limit_status: 119,
# rate_limit_reset_ms: 1583937810367,
# rate_limit: 120
# }
#
return self.parse_balance(response)
def parse_order_status(self, status):
statuses = {
# basic orders
'Created': 'open',
'Rejected': 'rejected', # order is triggered but failed upon being placed
'New': 'open',
'PartiallyFilled': 'open',
'Filled': 'closed',
'Cancelled': 'canceled',
'PendingCancel': 'canceling', # the engine has received the cancellation but there is no guarantee that it will be successful
# conditional orders
'Active': 'open', # order is triggered and placed successfully
'Untriggered': 'open', # order waits to be triggered
'Triggered': 'closed', # order is triggered
# 'Cancelled': 'canceled', # order is cancelled
# 'Rejected': 'rejected', # order is triggered but fail to be placed
'Deactivated': 'canceled', # conditional order was cancelled before triggering
}
return self.safe_string(statuses, status, status)
def parse_time_in_force(self, timeInForce):
timeInForces = {
'GoodTillCancel': 'GTC',
'ImmediateOrCancel': 'IOC',
'FillOrKill': 'FOK',
'PostOnly': 'PO',
}
return self.safe_string(timeInForces, timeInForce, timeInForce)
def parse_order(self, order, market=None):
#
# createOrder
#
# {
# "user_id": 1,
# "order_id": "335fd977-e5a5-4781-b6d0-c772d5bfb95b",
# "symbol": "BTCUSD",
# "side": "Buy",
# "order_type": "Limit",
# "price": 8800,
# "qty": 1,
# "time_in_force": "GoodTillCancel",
# "order_status": "Created",
# "last_exec_time": 0,
# "last_exec_price": 0,
# "leaves_qty": 1,
# "cum_exec_qty": 0, # in contracts, where 1 contract = 1 quote currency unit(USD for inverse contracts)
# "cum_exec_value": 0, # in contract's underlying currency(BTC for inverse contracts)
# "cum_exec_fee": 0,
# "reject_reason": "",
# "order_link_id": "",
# "created_at": "2019-11-30T11:03:43.452Z",
# "updated_at": "2019-11-30T11:03:43.455Z"
# }
#
# fetchOrder
#
# {
# "user_id" : 599946,
# "symbol" : "BTCUSD",
# "side" : "Buy",
# "order_type" : "Limit",
# "price" : "7948",
# "qty" : 10,
# "time_in_force" : "GoodTillCancel",
# "order_status" : "Filled",
# "ext_fields" : {
# "o_req_num" : -1600687220498,
# "xreq_type" : "x_create"
# },
# "last_exec_time" : "1588150113.968422",
# "last_exec_price" : "7948",
# "leaves_qty" : 0,
# "leaves_value" : "0",
# "cum_exec_qty" : 10,
# "cum_exec_value" : "0.00125817",
# "cum_exec_fee" : "-0.00000031",
# "reject_reason" : "",
# "cancel_type" : "",
# "order_link_id" : "",
# "created_at" : "2020-04-29T08:45:24.399146Z",
# "updated_at" : "2020-04-29T08:48:33.968422Z",
# "order_id" : "dd2504b9-0157-406a-99e1-efa522373944"
# }
#
# conditional order
#
# {
# "user_id":##,
# "symbol":"BTCUSD",
# "side":"Buy",
# "order_type":"Market",
# "price":0,
# "qty":10,
# "time_in_force":"GoodTillCancel",
# "stop_order_type":"Stop",
# "trigger_by":"LastPrice",
# "base_price":11833,
# "order_status":"Untriggered",
# "ext_fields":{
# "stop_order_type":"Stop",
# "trigger_by":"LastPrice",
# "base_price":11833,
# "expected_direction":"Rising",
# "trigger_price":12400,
# "close_on_trigger":true,
# "op_from":"api",
# "remark":"x.x.x.x",
# "o_req_num":0
# },
# "leaves_qty":10,
# "leaves_value":0.00080645,
# "reject_reason":null,
# "cross_seq":-1,
# "created_at":"2020-08-21T09:18:48.000Z",
# "updated_at":"2020-08-21T09:18:48.000Z",
# "trigger_price":12400,
# "stop_order_id":"3f3b54b1-3379-42c7-8510-44f4d9915be0"
# }
#
marketId = self.safe_string(order, 'symbol')
market = self.safe_market(marketId, market)
symbol = market['symbol']
feeCurrency = None
timestamp = self.parse8601(self.safe_string(order, 'created_at'))
id = self.safe_string_2(order, 'order_id', 'stop_order_id')
type = self.safe_string_lower(order, 'order_type')
price = None
if type != 'market':
price = self.safe_string(order, 'price')
average = self.safe_string(order, 'average_price')
amount = self.safe_string(order, 'qty')
cost = self.safe_string(order, 'cum_exec_value')
filled = self.safe_string(order, 'cum_exec_qty')
remaining = self.safe_string(order, 'leaves_qty')
marketTypes = self.safe_value(self.options, 'marketTypes', {})
marketType = self.safe_string(marketTypes, symbol)
if marketType == 'linear':
feeCurrency = market['quote']
else:
feeCurrency = market['base']
lastTradeTimestamp = self.safe_timestamp(order, 'last_exec_time')
if lastTradeTimestamp == 0:
lastTradeTimestamp = None
status = self.parse_order_status(self.safe_string_2(order, 'order_status', 'stop_order_status'))
side = self.safe_string_lower(order, 'side')
feeCostString = self.safe_string(order, 'cum_exec_fee')
fee = None
if feeCostString is not None:
fee = {
'cost': feeCostString,
'currency': feeCurrency,
}
clientOrderId = self.safe_string(order, 'order_link_id')
if (clientOrderId is not None) and (len(clientOrderId) < 1):
clientOrderId = None
timeInForce = self.parse_time_in_force(self.safe_string(order, 'time_in_force'))
stopPrice = self.safe_number_2(order, 'trigger_price', 'stop_px')
postOnly = (timeInForce == 'PO')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': cost,
'average': average,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': None,
}, market)
async def fetch_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
# 'order_link_id': 'string', # one of order_id, stop_order_id or order_link_id is required
# regular orders ---------------------------------------------
# 'order_id': id, # one of order_id or order_link_id is required for regular orders
# conditional orders ---------------------------------------------
# 'stop_order_id': id, # one of stop_order_id or order_link_id is required for conditional orders
}
method = None
if market['swap']:
if market['linear']:
method = 'privateLinearGetOrderSearch'
elif market['inverse']:
method = 'v2PrivateGetOrder'
elif market['future']:
method = 'futuresPrivateGetOrder'
stopOrderId = self.safe_string(params, 'stop_order_id')
if stopOrderId is None:
orderLinkId = self.safe_string(params, 'order_link_id')
if orderLinkId is None:
request['order_id'] = id
else:
if market['swap']:
if market['linear']:
method = 'privateLinearGetStopOrderSearch'
elif market['inverse']:
method = 'v2PrivateGetStopOrder'
elif market['future']:
method = 'futuresPrivateGetStopOrder'
response = await getattr(self, method)(self.extend(request, params))
#
# {
# "ret_code": 0,
# "ret_msg": "OK",
# "ext_code": "",
# "ext_info": "",
# "result": {
# "user_id": 1,
# "symbol": "BTCUSD",
# "side": "Sell",
# "order_type": "Limit",
# "price": "8083",
# "qty": 10,
# "time_in_force": "GoodTillCancel",
# "order_status": "New",
# "ext_fields": {"o_req_num": -308787, "xreq_type": "x_create", "xreq_offset": 4154640},
# "leaves_qty": 10,
# "leaves_value": "0.00123716",
# "cum_exec_qty": 0,
# "reject_reason": "",
# "order_link_id": "",
# "created_at": "2019-10-21T07:28:19.396246Z",
# "updated_at": "2019-10-21T07:28:19.396246Z",
# "order_id": "efa44157-c355-4a98-b6d6-1d846a936b93"
# },
# "time_now": "1571651135.291930",
# "rate_limit_status": 99, # The remaining number of accesses in one minute
# "rate_limit_reset_ms": 1580885703683,
# "rate_limit": 100
# }
#
# conditional orders
#
# {
# "ret_code": 0,
# "ret_msg": "OK",
# "ext_code": "",
# "ext_info": "",
# "result": {
# "user_id": 1,
# "symbol": "BTCUSD",
# "side": "Buy",
# "order_type": "Limit",
# "price": "8000",
# "qty": 1,
# "time_in_force": "GoodTillCancel",
# "order_status": "Untriggered",
# "ext_fields": {},
# "leaves_qty": 1,
# "leaves_value": "0.00013333",
# "cum_exec_qty": 0,
# "cum_exec_value": null,
# "cum_exec_fee": null,
# "reject_reason": "",
# "order_link_id": "",
# "created_at": "2019-12-27T19:56:24.052194Z",
# "updated_at": "2019-12-27T19:56:24.052194Z",
# "order_id": "378a1bbc-a93a-4e75-87f4-502ea754ba36"
# },
# "time_now": "1577476584.386958",
# "rate_limit_status": 99,
# "rate_limit_reset_ms": 1580885703683,
# "rate_limit": 100
# }
#
result = self.safe_value(response, 'result')
return self.parse_order(result, market)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
qty = self.amount_to_precision(symbol, amount)
if market['inverse']:
qty = int(qty)
else:
qty = float(qty)
request = {
# orders ---------------------------------------------------------
'side': self.capitalize(side),
'symbol': market['id'],
'order_type': self.capitalize(type),
'qty': qty, # order quantity in USD, integer only
# 'price': float(self.price_to_precision(symbol, price)), # required for limit orders
'time_in_force': 'GoodTillCancel', # ImmediateOrCancel, FillOrKill, PostOnly
# 'take_profit': 123.45, # take profit price, only take effect upon opening the position
# 'stop_loss': 123.45, # stop loss price, only take effect upon opening the position
# 'reduce_only': False, # reduce only, required for linear orders
# when creating a closing order, bybit recommends a True value for
# close_on_trigger to avoid failing due to insufficient available margin
# 'close_on_trigger': False, required for linear orders
# 'order_link_id': 'string', # unique client order id, max 36 characters
# conditional orders ---------------------------------------------
# base_price is used to compare with the value of stop_px, to decide
# whether your conditional order will be triggered by crossing trigger
# price from upper side or lower side, mainly used to identify the
# expected direction of the current conditional order
# 'base_price': 123.45, # required for conditional orders
# 'stop_px': 123.45, # trigger price, required for conditional orders
# 'trigger_by': 'LastPrice', # IndexPrice, MarkPrice
}
priceIsRequired = False
if type == 'limit':
priceIsRequired = True
if priceIsRequired:
if price is not None:
request['price'] = float(self.price_to_precision(symbol, price))
else:
raise ArgumentsRequired(self.id + ' createOrder() requires a price argument for a ' + type + ' order')
clientOrderId = self.safe_string_2(params, 'order_link_id', 'clientOrderId')
if clientOrderId is not None:
request['order_link_id'] = clientOrderId
params = self.omit(params, ['order_link_id', 'clientOrderId'])
stopPx = self.safe_value_2(params, 'stop_px', 'stopPrice')
basePrice = self.safe_value(params, 'base_price')
method = None
if market['swap']:
if market['linear']:
method = 'privateLinearPostOrderCreate'
request['reduce_only'] = False
request['close_on_trigger'] = False
elif market['inverse']:
method = 'v2PrivatePostOrderCreate'
elif market['future']:
method = 'futuresPrivatePostOrderCreate'
if stopPx is not None:
if basePrice is None:
raise ArgumentsRequired(self.id + ' createOrder() requires both the stop_px and base_price params for a conditional ' + type + ' order')
else:
if market['swap']:
if market['linear']:
method = 'privateLinearPostStopOrderCreate'
elif market['inverse']:
method = 'v2PrivatePostStopOrderCreate'
elif market['future']:
method = 'futuresPrivatePostStopOrderCreate'
request['stop_px'] = float(self.price_to_precision(symbol, stopPx))
request['base_price'] = float(self.price_to_precision(symbol, basePrice))
request['trigger_by'] = 'LastPrice'
params = self.omit(params, ['stop_px', 'stopPrice', 'base_price', 'trigger_by'])
elif basePrice is not None:
raise ArgumentsRequired(self.id + ' createOrder() requires both the stop_px and base_price params for a conditional ' + type + ' order')
response = await getattr(self, method)(self.extend(request, params))
#
# {
# "ret_code": 0,
# "ret_msg": "OK",
# "ext_code": "",
# "ext_info": "",
# "result": {
# "user_id": 1,
# "order_id": "335fd977-e5a5-4781-b6d0-c772d5bfb95b",
# "symbol": "BTCUSD",
# "side": "Buy",
# "order_type": "Limit",
# "price": 8800,
# "qty": 1,
# "time_in_force": "GoodTillCancel",
# "order_status": "Created",
# "last_exec_time": 0,
# "last_exec_price": 0,
# "leaves_qty": 1,
# "cum_exec_qty": 0,
# "cum_exec_value": 0,
# "cum_exec_fee": 0,
# "reject_reason": "",
# "order_link_id": "",
# "created_at": "2019-11-30T11:03:43.452Z",
# "updated_at": "2019-11-30T11:03:43.455Z"
# },
# "time_now": "1575111823.458705",
# "rate_limit_status": 98,
# "rate_limit_reset_ms": 1580885703683,
# "rate_limit": 100
# }
#
# conditional orders
#
# {
# "ret_code": 0,
# "ret_msg": "ok",
# "ext_code": "",
# "result": {
# "user_id": 1,
# "symbol": "BTCUSD",
# "side": "Buy",
# "order_type": "Limit",
# "price": 8000,
# "qty": 1,
# "time_in_force": "GoodTillCancel",
# "stop_order_type": "Stop",
# "trigger_by": "LastPrice",
# "base_price": 7000,
# "order_status": "Untriggered",
# "ext_fields": {
# "stop_order_type": "Stop",
# "trigger_by": "LastPrice",
# "base_price": 7000,
# "expected_direction": "Rising",
# "trigger_price": 7500,
# "op_from": "api",
# "remark": "127.0.01",
# "o_req_num": 0
# },
# "leaves_qty": 1,
# "leaves_value": 0.00013333,
# "reject_reason": null,
# "cross_seq": -1,
# "created_at": "2019-12-27T12:48:24.000Z",
# "updated_at": "2019-12-27T12:48:24.000Z",
# "stop_px": 7500,
# "stop_order_id": "a85cd1c0-a9a4-49d3-a1bd-bab5ebe946d5"
# },
# "ext_info": null,
# "time_now": "1577450904.327654",
# "rate_limit_status": 99,
# "rate_limit_reset_ms": 1577450904335,
# "rate_limit": "100"
# }
#
result = self.safe_value(response, 'result')
return self.parse_order(result, market)
async def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' editOrder() requires an symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
# 'order_id': id, # only for non-conditional orders
'symbol': market['id'],
# 'p_r_qty': self.amount_to_precision(symbol, amount), # new order quantity, optional
# 'p_r_price' self.priceToprecision(symbol, price), # new order price, optional
# ----------------------------------------------------------------
# conditional orders
# 'stop_order_id': id, # only for conditional orders
# 'p_r_trigger_price': 123.45, # new trigger price also known as stop_px
}
method = None
if market['swap']:
if market['linear']:
method = 'privateLinearPostOrderReplace'
elif market['inverse']:
method = 'v2PrivatePostOrderReplace'
elif market['future']:
method = 'futuresPrivatePostOrderReplace'
stopOrderId = self.safe_string(params, 'stop_order_id')
if stopOrderId is not None:
if market['swap']:
if market['linear']:
method = 'privateLinearPostStopOrderReplace'
elif market['inverse']:
method = 'v2PrivatePostStopOrderReplace'
elif market['future']:
method = 'futuresPrivatePostStopOrderReplace'
request['stop_order_id'] = stopOrderId
params = self.omit(params, ['stop_order_id'])
else:
request['order_id'] = id
if amount is not None:
qty = self.amount_to_precision(symbol, amount)
if market['inverse']:
qty = int(qty)
else:
qty = float(qty)
request['p_r_qty'] = qty
if price is not None:
request['p_r_price'] = float(self.price_to_precision(symbol, price))
response = await getattr(self, method)(self.extend(request, params))
#
# {
# "ret_code": 0,
# "ret_msg": "ok",
# "ext_code": "",
# "result": {"order_id": "efa44157-c355-4a98-b6d6-1d846a936b93"},
# "time_now": "1539778407.210858",
# "rate_limit_status": 99, # remaining number of accesses in one minute
# "rate_limit_reset_ms": 1580885703683,
# "rate_limit": 100
# }
#
# conditional orders
#
# {
# "ret_code": 0,
# "ret_msg": "ok",
# "ext_code": "",
# "result": {"stop_order_id": "378a1bbc-a93a-4e75-87f4-502ea754ba36"},
# "ext_info": null,
# "time_now": "1577475760.604942",
# "rate_limit_status": 96,
# "rate_limit_reset_ms": 1577475760612,
# "rate_limit": "100"
# }
#
result = self.safe_value(response, 'result', {})
return {
'info': response,
'id': self.safe_string_2(result, 'order_id', 'stop_order_id'),
'order_id': self.safe_string(result, 'order_id'),
'stop_order_id': self.safe_string(result, 'stop_order_id'),
}
async def cancel_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
# 'order_link_id': 'string', # one of order_id, stop_order_id or order_link_id is required
# regular orders ---------------------------------------------
# 'order_id': id, # one of order_id or order_link_id is required for regular orders
# conditional orders ---------------------------------------------
# 'stop_order_id': id, # one of stop_order_id or order_link_id is required for conditional orders
}
method = None
if market['swap']:
if market['linear']:
method = 'privateLinearPostOrderCancel'
elif market['inverse']:
method = 'v2PrivatePostOrderCancel'
elif market['future']:
method = 'futuresPrivatePostOrderCancel'
stopOrderId = self.safe_string(params, 'stop_order_id')
if stopOrderId is None:
orderLinkId = self.safe_string(params, 'order_link_id')
if orderLinkId is None:
request['order_id'] = id
else:
if market['swap']:
if market['linear']:
method = 'privateLinearPostStopOrderCancel'
elif market['inverse']:
method = 'v2PrivatePostStopOrderCancel'
elif market['future']:
method = 'futuresPrivatePostStopOrderCancel'
response = await getattr(self, method)(self.extend(request, params))
result = self.safe_value(response, 'result', {})
return self.parse_order(result, market)
async def cancel_all_orders(self, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelAllOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
options = self.safe_value(self.options, 'cancelAllOrders', {})
defaultMethod = None
if market['swap']:
if market['linear']:
defaultMethod = 'privateLinearPostOrderCancelAll'
elif market['inverse']:
defaultMethod = 'v2PrivatePostOrderCancelAll'
elif market['future']:
defaultMethod = 'futuresPrivatePostOrderCancelAll'
method = self.safe_string(options, 'method', defaultMethod)
response = await getattr(self, method)(self.extend(request, params))
result = self.safe_value(response, 'result', [])
return self.parse_orders(result, market)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
# 'order_id': 'string'
# 'order_link_id': 'string', # unique client order id, max 36 characters
# 'symbol': market['id'], # default BTCUSD
# 'order': 'desc', # asc
# 'page': 1,
# 'limit': 20, # max 50
# 'order_status': 'Created,New'
# conditional orders ---------------------------------------------
# 'stop_order_id': 'string',
# 'stop_order_status': 'Untriggered',
}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
if limit is not None:
request['limit'] = limit
options = self.safe_value(self.options, 'fetchOrders', {})
defaultType = self.safe_string(self.options, 'defaultType', 'linear')
marketTypes = self.safe_value(self.options, 'marketTypes', {})
marketType = self.safe_string(marketTypes, symbol, defaultType)
defaultMethod = None
marketDefined = (market is not None)
linear = (marketDefined and market['linear']) or (marketType == 'linear')
inverse = (marketDefined and market['swap'] and market['inverse']) or (marketType == 'inverse')
future = (marketDefined and market['future']) or ((marketType == 'future') or (marketType == 'futures')) # * (marketType == 'futures') deprecated, use(marketType == 'future')
if linear:
defaultMethod = 'privateLinearGetOrderList'
elif inverse:
defaultMethod = 'v2PrivateGetOrderList'
elif future:
defaultMethod = 'futuresPrivateGetOrderList'
query = params
if ('stop_order_id' in params) or ('stop_order_status' in params):
stopOrderStatus = self.safe_value(params, 'stop_order_status')
if stopOrderStatus is not None:
if isinstance(stopOrderStatus, list):
stopOrderStatus = ','.join(stopOrderStatus)
request['stop_order_status'] = stopOrderStatus
query = self.omit(params, 'stop_order_status')
if linear:
defaultMethod = 'privateLinearGetStopOrderList'
elif inverse:
defaultMethod = 'v2PrivateGetStopOrderList'
elif future:
defaultMethod = 'futuresPrivateGetStopOrderList'
method = self.safe_string(options, 'method', defaultMethod)
response = await getattr(self, method)(self.extend(request, query))
#
# {
# "ret_code": 0,
# "ret_msg": "ok",
# "ext_code": "",
# "result": {
# "current_page": 1,
# "last_page": 6,
# "data": [
# {
# "user_id": 1,
# "symbol": "BTCUSD",
# "side": "Sell",
# "order_type": "Market",
# "price": 7074,
# "qty": 2,
# "time_in_force": "ImmediateOrCancel",
# "order_status": "Filled",
# "ext_fields": {
# "close_on_trigger": True,
# "orig_order_type": "BLimit",
# "prior_x_req_price": 5898.5,
# "op_from": "pc",
# "remark": "127.0.0.1",
# "o_req_num": -34799032763,
# "xreq_type": "x_create"
# },
# "last_exec_time": "1577448481.696421",
# "last_exec_price": 7070.5,
# "leaves_qty": 0,
# "leaves_value": 0,
# "cum_exec_qty": 2,
# "cum_exec_value": 0.00028283,
# "cum_exec_fee": 0.00002,
# "reject_reason": "NoError",
# "order_link_id": "",
# "created_at": "2019-12-27T12:08:01.000Z",
# "updated_at": "2019-12-27T12:08:01.000Z",
# "order_id": "f185806b-b801-40ff-adec-52289370ed62"
# }
# ]
# },
# "ext_info": null,
# "time_now": "1577448922.437871",
# "rate_limit_status": 98,
# "rate_limit_reset_ms": 1580885703683,
# "rate_limit": 100
# }
#
# conditional orders
#
# {
# "ret_code": 0,
# "ret_msg": "ok",
# "ext_code": "",
# "result": {
# "current_page": 1,
# "last_page": 1,
# "data": [
# {
# "user_id": 1,
# "stop_order_status": "Untriggered",
# "symbol": "BTCUSD",
# "side": "Buy",
# "order_type": "Limit",
# "price": 8000,
# "qty": 1,
# "time_in_force": "GoodTillCancel",
# "stop_order_type": "Stop",
# "trigger_by": "LastPrice",
# "base_price": 7000,
# "order_link_id": "",
# "created_at": "2019-12-27T12:48:24.000Z",
# "updated_at": "2019-12-27T12:48:24.000Z",
# "stop_px": 7500,
# "stop_order_id": "a85cd1c0-a9a4-49d3-a1bd-bab5ebe946d5"
# },
# ]
# },
# "ext_info": null,
# "time_now": "1577451658.755468",
# "rate_limit_status": 599,
# "rate_limit_reset_ms": 1577451658762,
# "rate_limit": 600
# }
#
result = self.safe_value(response, 'result', {})
data = self.safe_value(result, 'data', [])
return self.parse_orders(data, market, since, limit)
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
defaultStatuses = [
'Rejected',
'Filled',
'Cancelled',
# conditional orders
# 'Active',
# 'Triggered',
# 'Cancelled',
# 'Rejected',
# 'Deactivated',
]
options = self.safe_value(self.options, 'fetchClosedOrders', {})
status = self.safe_value(options, 'order_status', defaultStatuses)
if isinstance(status, list):
status = ','.join(status)
request = {}
stopOrderStatus = self.safe_value(params, 'stop_order_status')
if stopOrderStatus is None:
request['order_status'] = status
else:
request['stop_order_status'] = stopOrderStatus
return await self.fetch_orders(symbol, since, limit, self.extend(request, params))
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
defaultStatuses = [
'Created',
'New',
'PartiallyFilled',
'PendingCancel',
# conditional orders
# 'Untriggered',
]
options = self.safe_value(self.options, 'fetchOpenOrders', {})
status = self.safe_value(options, 'order_status', defaultStatuses)
if isinstance(status, list):
status = ','.join(status)
request = {}
stopOrderStatus = self.safe_value(params, 'stop_order_status')
if stopOrderStatus is None:
request['order_status'] = status
else:
request['stop_order_status'] = stopOrderStatus
return await self.fetch_orders(symbol, since, limit, self.extend(request, params))
async def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
request = {
'order_id': id,
}
return await self.fetch_my_trades(symbol, since, limit, self.extend(request, params))
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
await self.load_markets()
request = {
# 'order_id': 'f185806b-b801-40ff-adec-52289370ed62', # if not provided will return user's trading records
# 'symbol': market['id'],
# 'start_time': int(since / 1000),
# 'page': 1,
# 'limit' 20, # max 50
}
market = None
orderId = self.safe_string(params, 'order_id')
if orderId is not None:
request['order_id'] = orderId
params = self.omit(params, 'order_id')
market = self.market(symbol)
request['symbol'] = market['id']
if since is not None:
request['start_time'] = since
if limit is not None:
request['limit'] = limit # default 20, max 50
marketType, query = self.handle_market_type_and_params('fetchMyTrades', market, params)
marketDefined = (market is not None)
linear = (marketDefined and market['linear']) or (marketType == 'linear')
inverse = (marketDefined and market['swap'] and market['inverse']) or (marketType == 'inverse')
future = (marketDefined and market['future']) or ((marketType == 'future') or (marketType == 'futures')) # * (marketType == 'futures') deprecated, use(marketType == 'future')
method = None
if linear:
method = 'privateLinearGetTradeExecutionList'
elif inverse:
method = 'v2PrivateGetExecutionList'
elif future:
method = 'futuresPrivateGetExecutionList'
response = await getattr(self, method)(self.extend(request, query))
#
# inverse
#
# {
# "ret_code": 0,
# "ret_msg": "OK",
# "ext_code": "",
# "ext_info": "",
# "result": {
# "order_id": "Abandonednot !", # Abandonednot !
# "trade_list": [
# {
# "closed_size": 0,
# "cross_seq": 277136382,
# "exec_fee": "0.0000001",
# "exec_id": "256e5ef8-abfe-5772-971b-f944e15e0d68",
# "exec_price": "8178.5",
# "exec_qty": 1,
# "exec_time": "1571676941.70682",
# "exec_type": "Trade", #Exec Type Enum
# "exec_value": "0.00012227",
# "fee_rate": "0.00075",
# "last_liquidity_ind": "RemovedLiquidity", #Liquidity Enum
# "leaves_qty": 0,
# "nth_fill": 2,
# "order_id": "7ad50cb1-9ad0-4f74-804b-d82a516e1029",
# "order_link_id": "",
# "order_price": "8178",
# "order_qty": 1,
# "order_type": "Market", #Order Type Enum
# "side": "Buy", #Side Enum
# "symbol": "BTCUSD", #Symbol Enum
# "user_id": 1
# }
# ]
# },
# "time_now": "1577483699.281488",
# "rate_limit_status": 118,
# "rate_limit_reset_ms": 1577483699244737,
# "rate_limit": 120
# }
#
# linear
#
# {
# "ret_code":0,
# "ret_msg":"OK",
# "ext_code":"",
# "ext_info":"",
# "result":{
# "current_page":1,
# "data":[
# {
# "order_id":"b59418ec-14d4-4ef9-b9f4-721d5d576974",
# "order_link_id":"",
# "side":"Sell",
# "symbol":"BTCUSDT",
# "exec_id":"0327284d-faec-5191-bd89-acc5b4fafda9",
# "price":0.5,
# "order_price":0.5,
# "order_qty":0.01,
# "order_type":"Market",
# "fee_rate":0.00075,
# "exec_price":9709.5,
# "exec_type":"Trade",
# "exec_qty":0.01,
# "exec_fee":0.07282125,
# "exec_value":97.095,
# "leaves_qty":0,
# "closed_size":0.01,
# "last_liquidity_ind":"RemovedLiquidity",
# "trade_time":1591648052,
# "trade_time_ms":1591648052861
# }
# ]
# },
# "time_now":"1591736501.979264",
# "rate_limit_status":119,
# "rate_limit_reset_ms":1591736501974,
# "rate_limit":120
# }
#
result = self.safe_value(response, 'result', {})
trades = self.safe_value_2(result, 'trade_list', 'data', [])
return self.parse_trades(trades, market, since, limit)
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
# 'coin': currency['id'],
# 'currency': currency['id'], # alias
# 'start_date': self.iso8601(since),
# 'end_date': self.iso8601(till),
'wallet_fund_type': 'Deposit', # Deposit, Withdraw, RealisedPNL, Commission, Refund, Prize, ExchangeOrderWithdraw, ExchangeOrderDeposit
# 'page': 1,
# 'limit': 20, # max 50
}
currency = None
if code is not None:
currency = self.currency(code)
request['coin'] = currency['id']
if since is not None:
request['start_date'] = self.yyyymmdd(since)
if limit is not None:
request['limit'] = limit
response = await self.v2PrivateGetWalletFundRecords(self.extend(request, params))
#
# {
# "ret_code": 0,
# "ret_msg": "ok",
# "ext_code": "",
# "result": {
# "data": [
# {
# "id": 234467,
# "user_id": 1,
# "coin": "BTC",
# "wallet_id": 27913,
# "type": "Realized P&L",
# "amount": "-0.00000006",
# "tx_id": "",
# "address": "BTCUSD",
# "wallet_balance": "0.03000330",
# "exec_time": "2019-12-09T00:00:25.000Z",
# "cross_seq": 0
# }
# ]
# },
# "ext_info": null,
# "time_now": "1577481867.115552",
# "rate_limit_status": 119,
# "rate_limit_reset_ms": 1577481867122,
# "rate_limit": 120
# }
#
result = self.safe_value(response, 'result', {})
data = self.safe_value(result, 'data', [])
return self.parse_transactions(data, currency, since, limit, {'type': 'deposit'})
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
# 'coin': currency['id'],
# 'start_date': self.iso8601(since),
# 'end_date': self.iso8601(till),
# 'status': 'Pending', # ToBeConfirmed, UnderReview, Pending, Success, CancelByUser, Reject, Expire
# 'page': 1,
# 'limit': 20, # max 50
}
currency = None
if code is not None:
currency = self.currency(code)
request['coin'] = currency['id']
if since is not None:
request['start_date'] = self.yyyymmdd(since)
if limit is not None:
request['limit'] = limit
response = await self.v2PrivateGetWalletWithdrawList(self.extend(request, params))
#
# {
# "ret_code": 0,
# "ret_msg": "ok",
# "ext_code": "",
# "result": {
# "data": [
# {
# "id": 137,
# "user_id": 1,
# "coin": "XRP", # Coin Enum
# "status": "Pending", # Withdraw Status Enum
# "amount": "20.00000000",
# "fee": "0.25000000",
# "address": "rH7H595XYEVTEHU2FySYsWnmfACBnZS9zM",
# "tx_id": "",
# "submited_at": "2019-06-11T02:20:24.000Z",
# "updated_at": "2019-06-11T02:20:24.000Z"
# },
# ],
# "current_page": 1,
# "last_page": 1
# },
# "ext_info": null,
# "time_now": "1577482295.125488",
# "rate_limit_status": 119,
# "rate_limit_reset_ms": 1577482295132,
# "rate_limit": 120
# }
#
result = self.safe_value(response, 'result', {})
data = self.safe_value(result, 'data', [])
return self.parse_transactions(data, currency, since, limit, {'type': 'withdrawal'})
def parse_transaction_status(self, status):
statuses = {
'ToBeConfirmed': 'pending',
'UnderReview': 'pending',
'Pending': 'pending',
'Success': 'ok',
'CancelByUser': 'canceled',
'Reject': 'rejected',
'Expire': 'expired',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# fetchWithdrawals
#
# {
# "id": 137,
# "user_id": 1,
# "coin": "XRP", # Coin Enum
# "status": "Pending", # Withdraw Status Enum
# "amount": "20.00000000",
# "fee": "0.25000000",
# "address": "rH7H595XYEVTEHU2FySYsWnmfACBnZS9zM",
# "tx_id": "",
# "submited_at": "2019-06-11T02:20:24.000Z",
# "updated_at": "2019-06-11T02:20:24.000Z"
# }
#
# fetchDeposits ledger entries
#
# {
# "id": 234467,
# "user_id": 1,
# "coin": "BTC",
# "wallet_id": 27913,
# "type": "Realized P&L",
# "amount": "-0.00000006",
# "tx_id": "",
# "address": "BTCUSD",
# "wallet_balance": "0.03000330",
# "exec_time": "2019-12-09T00:00:25.000Z",
# "cross_seq": 0
# }
#
currencyId = self.safe_string(transaction, 'coin')
code = self.safe_currency_code(currencyId, currency)
timestamp = self.parse8601(self.safe_string_2(transaction, 'submited_at', 'exec_time'))
updated = self.parse8601(self.safe_string(transaction, 'updated_at'))
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
address = self.safe_string(transaction, 'address')
feeCost = self.safe_number(transaction, 'fee')
type = self.safe_string_lower(transaction, 'type')
fee = None
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': code,
}
return {
'info': transaction,
'id': self.safe_string(transaction, 'id'),
'txid': self.safe_string(transaction, 'tx_id'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'network': None,
'address': address,
'addressTo': None,
'addressFrom': None,
'tag': None,
'tagTo': None,
'tagFrom': None,
'type': type,
'amount': self.safe_number(transaction, 'amount'),
'currency': code,
'status': status,
'updated': updated,
'fee': fee,
}
async def fetch_ledger(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
# 'coin': currency['id'],
# 'currency': currency['id'], # alias
# 'start_date': self.iso8601(since),
# 'end_date': self.iso8601(till),
# 'wallet_fund_type': 'Deposit', # Withdraw, RealisedPNL, Commission, Refund, Prize, ExchangeOrderWithdraw, ExchangeOrderDeposit
# 'page': 1,
# 'limit': 20, # max 50
}
currency = None
if code is not None:
currency = self.currency(code)
request['coin'] = currency['id']
if since is not None:
request['start_date'] = self.yyyymmdd(since)
if limit is not None:
request['limit'] = limit
response = await self.v2PrivateGetWalletFundRecords(self.extend(request, params))
#
# {
# "ret_code": 0,
# "ret_msg": "ok",
# "ext_code": "",
# "result": {
# "data": [
# {
# "id": 234467,
# "user_id": 1,
# "coin": "BTC",
# "wallet_id": 27913,
# "type": "Realized P&L",
# "amount": "-0.00000006",
# "tx_id": "",
# "address": "BTCUSD",
# "wallet_balance": "0.03000330",
# "exec_time": "2019-12-09T00:00:25.000Z",
# "cross_seq": 0
# }
# ]
# },
# "ext_info": null,
# "time_now": "1577481867.115552",
# "rate_limit_status": 119,
# "rate_limit_reset_ms": 1577481867122,
# "rate_limit": 120
# }
#
result = self.safe_value(response, 'result', {})
data = self.safe_value(result, 'data', [])
return self.parse_ledger(data, currency, since, limit)
def parse_ledger_entry(self, item, currency=None):
#
# {
# "id": 234467,
# "user_id": 1,
# "coin": "BTC",
# "wallet_id": 27913,
# "type": "Realized P&L",
# "amount": "-0.00000006",
# "tx_id": "",
# "address": "BTCUSD",
# "wallet_balance": "0.03000330",
# "exec_time": "2019-12-09T00:00:25.000Z",
# "cross_seq": 0
# }
#
currencyId = self.safe_string(item, 'coin')
code = self.safe_currency_code(currencyId, currency)
amount = self.safe_number(item, 'amount')
after = self.safe_number(item, 'wallet_balance')
direction = 'out' if (amount < 0) else 'in'
before = None
if after is not None and amount is not None:
difference = amount if (direction == 'out') else -amount
before = self.sum(after, difference)
timestamp = self.parse8601(self.safe_string(item, 'exec_time'))
type = self.parse_ledger_entry_type(self.safe_string(item, 'type'))
id = self.safe_string(item, 'id')
referenceId = self.safe_string(item, 'tx_id')
return {
'id': id,
'currency': code,
'account': self.safe_string(item, 'wallet_id'),
'referenceAccount': None,
'referenceId': referenceId,
'status': None,
'amount': amount,
'before': before,
'after': after,
'fee': None,
'direction': direction,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'type': type,
'info': item,
}
def parse_ledger_entry_type(self, type):
types = {
'Deposit': 'transaction',
'Withdraw': 'transaction',
'RealisedPNL': 'trade',
'Commission': 'fee',
'Refund': 'cashback',
'Prize': 'prize', # ?
'ExchangeOrderWithdraw': 'transaction',
'ExchangeOrderDeposit': 'transaction',
}
return self.safe_string(types, type, type)
async def fetch_positions(self, symbols=None, params={}):
await self.load_markets()
request = {}
if isinstance(symbols, list):
length = len(symbols)
if length != 1:
raise ArgumentsRequired(self.id + ' fetchPositions takes an array with exactly one symbol')
request['symbol'] = self.market_id(symbols[0])
defaultType = self.safe_string(self.options, 'defaultType', 'linear')
type = self.safe_string(params, 'type', defaultType)
params = self.omit(params, 'type')
response = None
if type == 'linear':
response = await self.privateLinearGetPositionList(self.extend(request, params))
elif type == 'inverse':
response = await self.v2PrivateGetPositionList(self.extend(request, params))
elif type == 'inverseFuture':
response = await self.futuresPrivateGetPositionList(self.extend(request, params))
if (isinstance(response, str)) and self.is_json_encoded_object(response):
response = json.loads(response)
#
# {
# ret_code: 0,
# ret_msg: 'OK',
# ext_code: '',
# ext_info: '',
# result: [] or {} depending on the request
# }
#
return self.safe_value(response, 'result')
async def set_margin_mode(self, marginType, symbol=None, params={}):
#
# {
# "ret_code": 0,
# "ret_msg": "ok",
# "ext_code": "",
# "result": null,
# "ext_info": null,
# "time_now": "1577477968.175013",
# "rate_limit_status": 74,
# "rate_limit_reset_ms": 1577477968183,
# "rate_limit": 75
# }
#
leverage = self.safe_value(params, 'leverage')
if leverage is None:
raise ArgumentsRequired(self.id + ' setMarginMode() requires a leverage parameter')
marginType = marginType.upper()
if marginType == 'CROSSED': # * Deprecated, use 'CROSS' instead
marginType = 'CROSS'
if (marginType != 'ISOLATED') and (marginType != 'CROSS'):
raise BadRequest(self.id + ' marginType must be either isolated or cross')
await self.load_markets()
market = self.market(symbol)
method = None
defaultType = self.safe_string(self.options, 'defaultType', 'linear')
marketTypes = self.safe_value(self.options, 'marketTypes', {})
marketType = self.safe_string(marketTypes, symbol, defaultType)
linear = market['linear'] or (marketType == 'linear')
inverse = (market['swap'] and market['inverse']) or (marketType == 'inverse')
future = market['future'] or ((marketType == 'future') or (marketType == 'futures')) # * (marketType == 'futures') deprecated, use(marketType == 'future')
if linear:
method = 'privateLinearPostPositionSwitchIsolated'
elif inverse:
method = 'v2PrivatePostPositionSwitchIsolated'
elif future:
method = 'privateFuturesPostPositionSwitchIsolated'
isIsolated = (marginType == 'ISOLATED')
request = {
'symbol': market['id'],
'is_isolated': isIsolated,
'buy_leverage': leverage,
'sell_leverage': leverage,
}
response = await getattr(self, method)(self.extend(request, params))
#
# {
# "ret_code": 0,
# "ret_msg": "OK",
# "ext_code": "",
# "ext_info": "",
# "result": null,
# "time_now": "1585881597.006026",
# "rate_limit_status": 74,
# "rate_limit_reset_ms": 1585881597004,
# "rate_limit": 75
# }
#
return response
async def set_leverage(self, leverage, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' setLeverage() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
# WARNING: THIS WILL INCREASE LIQUIDATION PRICE FOR OPEN ISOLATED LONG POSITIONS
# AND DECREASE LIQUIDATION PRICE FOR OPEN ISOLATED SHORT POSITIONS
defaultType = self.safe_string(self.options, 'defaultType', 'linear')
marketTypes = self.safe_value(self.options, 'marketTypes', {})
marketType = self.safe_string(marketTypes, symbol, defaultType)
linear = market['linear'] or (marketType == 'linear')
inverse = (market['swap'] and market['inverse']) or (marketType == 'inverse')
future = market['future'] or ((marketType == 'future') or (marketType == 'futures')) # * (marketType == 'futures') deprecated, use(marketType == 'future')
method = None
if linear:
method = 'privateLinearPostPositionSetLeverage'
elif inverse:
method = 'v2PrivatePostPositionLeverageSave'
elif future:
method = 'privateFuturesPostPositionLeverageSave'
buy_leverage = leverage
sell_leverage = leverage
if params['buy_leverage'] and params['sell_leverage'] and linear:
buy_leverage = params['buy_leverage']
sell_leverage = params['sell_leverage']
elif not leverage:
if linear:
raise ArgumentsRequired(self.id + ' setLeverage() requires either the parameter leverage or params["buy_leverage"] and params["sell_leverage"] for linear contracts')
else:
raise ArgumentsRequired(self.id + ' setLeverage() requires parameter leverage for inverse and futures contracts')
if (buy_leverage < 1) or (buy_leverage > 100) or (sell_leverage < 1) or (sell_leverage > 100):
raise BadRequest(self.id + ' leverage should be between 1 and 100')
request = {
'symbol': market['id'],
'leverage_only': True,
}
if not linear:
request['leverage'] = buy_leverage
else:
request['buy_leverage'] = buy_leverage
request['sell_leverage'] = sell_leverage
return await getattr(self, method)(self.extend(request, params))
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = None
if isinstance(api, list):
type = self.safe_string(api, 0)
section = self.safe_string(api, 1)
if type == 'spot':
if section == 'public':
section = 'v1'
else:
section += '/v1'
url = self.implode_hostname(self.urls['api'][type])
request = '/' + type + '/' + section + '/' + path
if (type == 'spot') or (type == 'quote'):
if params:
request += '?' + self.rawencode(params)
elif section == 'public':
if params:
request += '?' + self.rawencode(params)
elif type == 'public':
if params:
request += '?' + self.rawencode(params)
else:
self.check_required_credentials()
timestamp = self.nonce()
query = self.extend(params, {
'api_key': self.apiKey,
'recv_window': self.options['recvWindow'],
'timestamp': timestamp,
})
sortedQuery = self.keysort(query)
auth = self.rawencode(sortedQuery)
signature = self.hmac(self.encode(auth), self.encode(self.secret))
if method == 'POST':
body = self.json(self.extend(query, {
'sign': signature,
}))
headers = {
'Content-Type': 'application/json',
}
else:
request += '?' + self.urlencode(sortedQuery) + '&sign=' + signature
url += request
else:
url = self.implode_hostname(self.urls['api'][api]) + '/' + path
if api == 'public':
if params:
url += '?' + self.rawencode(params)
elif api == 'private':
self.check_required_credentials()
timestamp = self.nonce()
query = self.extend(params, {
'api_key': self.apiKey,
'recv_window': self.options['recvWindow'],
'timestamp': timestamp,
})
sortedQuery = self.keysort(query)
auth = self.rawencode(sortedQuery)
signature = self.hmac(self.encode(auth), self.encode(self.secret))
if method == 'POST':
body = self.json(self.extend(query, {
'sign': signature,
}))
headers = {
'Content-Type': 'application/json',
}
else:
url += '?' + self.urlencode(sortedQuery) + '&sign=' + signature
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if not response:
return # fallback to default error handler
#
# {
# ret_code: 10001,
# ret_msg: 'ReadMapCB: expect {or n, but found \u0000, error ' +
# 'found in #0 byte of ...||..., bigger context ' +
# '...||...',
# ext_code: '',
# ext_info: '',
# result: null,
# time_now: '1583934106.590436'
# }
#
# {
# "retCode":10001,
# "retMsg":"symbol params err",
# "result":{"symbol":"","bid":"","bidIv":"","bidSize":"","ask":"","askIv":"","askSize":"","lastPrice":"","openInterest":"","indexPrice":"","markPrice":"","markPriceIv":"","change24h":"","high24h":"","low24h":"","volume24h":"","turnover24h":"","totalVolume":"","totalTurnover":"","fundingRate":"","predictedFundingRate":"","nextFundingTime":"","countdownHour":"0","predictedDeliveryPrice":"","underlyingPrice":"","delta":"","gamma":"","vega":"","theta":""}
# }
#
errorCode = self.safe_string_2(response, 'ret_code', 'retCode')
if errorCode != '0':
if errorCode == '30084':
# not an error
# https://github.com/ccxt/ccxt/issues/11268
# https://github.com/ccxt/ccxt/pull/11624
# POST https://api.bybit.com/v2/private/position/switch-isolated 200 OK
# {"ret_code":30084,"ret_msg":"Isolated not modified","ext_code":"","ext_info":"","result":null,"time_now":"1642005219.937988","rate_limit_status":73,"rate_limit_reset_ms":1642005219894,"rate_limit":75}
return None
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], body, feedback)
raise ExchangeError(feedback) # unknown message
async def fetch_market_leverage_tiers(self, symbol, params={}):
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
if market['spot']:
raise BadRequest(self.id + ' fetchLeverageTiers() symbol supports contract markets only')
request['symbol'] = market['id']
type, query = self.handle_market_type_and_params('fetchMarketLeverageTiers', market, params)
method = self.get_supported_mapping(type, {
'linear': 'publicLinearGetRiskLimit', # Symbol required
'swap': 'publicLinearGetRiskLimit',
'inverse': 'v2PublicGetRiskLimitList', # Symbol not required, could implement fetchLeverageTiers
'future': 'v2PublicGetRiskLimitList',
})
response = await getattr(self, method)(self.extend(request, query))
#
# publicLinearGetRiskLimit
# {
# ret_code: '0',
# ret_msg: 'OK',
# ext_code: '',
# ext_info: '',
# result: [
# {
# id: '11',
# symbol: 'ETHUSDT',
# limit: '800000',
# maintain_margin: '0.01',
# starting_margin: '0.02',
# section: [
# '1', '2', '3',
# '5', '10', '15',
# '25'
# ],
# is_lowest_risk: '1',
# created_at: '2022-02-04 23:30:33.555252',
# updated_at: '2022-02-04 23:30:33.555254',
# max_leverage: '50'
# },
# ...
# ]
# }
#
# v2PublicGetRiskLimitList
# {
# ret_code: '0',
# ret_msg: 'OK',
# ext_code: '',
# ext_info: '',
# result: [
# {
# id: '180',
# is_lowest_risk: '0',
# section: [
# '1', '2', '3',
# '4', '5', '7',
# '8', '9'
# ],
# symbol: 'ETHUSDH22',
# limit: '30000',
# max_leverage: '9',
# starting_margin: '11',
# maintain_margin: '5.5',
# coin: 'ETH',
# created_at: '2021-04-22T15:00:00Z',
# updated_at: '2021-04-22T15:00:00Z'
# },
# ],
# time_now: '1644017569.683191'
# }
#
result = self.safe_value(response, 'result')
return self.parse_market_leverage_tiers(result, market)
def parse_market_leverage_tiers(self, info, market):
#
# Linear
# [
# {
# id: '11',
# symbol: 'ETHUSDT',
# limit: '800000',
# maintain_margin: '0.01',
# starting_margin: '0.02',
# section: [
# '1', '2', '3',
# '5', '10', '15',
# '25'
# ],
# is_lowest_risk: '1',
# created_at: '2022-02-04 23:30:33.555252',
# updated_at: '2022-02-04 23:30:33.555254',
# max_leverage: '50'
# },
# ...
# ]
#
# Inverse
# [
# {
# id: '180',
# is_lowest_risk: '0',
# section: [
# '1', '2', '3',
# '4', '5', '7',
# '8', '9'
# ],
# symbol: 'ETHUSDH22',
# limit: '30000',
# max_leverage: '9',
# starting_margin: '11',
# maintain_margin: '5.5',
# coin: 'ETH',
# created_at: '2021-04-22T15:00:00Z',
# updated_at: '2021-04-22T15:00:00Z'
# }
# ...
# ]
#
notionalFloor = 0
tiers = []
for i in range(0, len(info)):
item = info[i]
notionalCap = self.safe_number(item, 'limit')
tiers.append({
'tier': self.sum(i, 1),
'currency': market['base'],
'notionalFloor': notionalFloor,
'notionalCap': notionalCap,
'maintenanceMarginRate': self.safe_number(item, 'maintain_margin'),
'maxLeverage': self.safe_number(item, 'max_leverage'),
'info': item,
})
notionalFloor = notionalCap
return tiers
| [
"[email protected]"
] | |
bb1016bce463941647fcdb119e9254f4af8aff17 | a867b1c9da10a93136550c767c45e0d8c98f5675 | /D3_LC_74_Search_a_2D_Matrix.py | 08d04637b85bca5f33d4a675c5a8fa58d6c01547 | [] | no_license | Omkar02/FAANG | f747aacc938bf747129b8ff35b6648fb265d95b6 | ee9b245aa83ea58aa67954ab96442561dbe68d06 | refs/heads/master | 2023-03-25T19:45:08.153403 | 2021-03-28T07:13:08 | 2021-03-28T07:13:08 | 280,783,785 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,866 | py | # import __main__ as main
# from Helper.TimerLogger import CodeTimeLogging
# fileName = main.__file__
# fileName = fileName.split('\\')[-1]
# CodeTimeLogging(Flag='F', filename=fileName, Tag='Matrix', Difficult='Medium')
"""Integers in each row are sorted from left to right."""
def searchMatrix(matrix, target):
if not matrix:
return False
r, c = 0, len(matrix[0]) - 1
cnt = 0
while r < len(matrix) and c >= 0:
print(matrix[r][c])
if target < matrix[r][c]:
c -= 1
if target > matrix[r][c]:
r += 1
else:
return True
if cnt == 10:
return False
# mat = [[1, 3, 5, 7],
# [10, 11, 16, 20],
# [23, 30, 34, 50]]
# target = 30
mat = [[-8, -6, -5, -4, -2, -1, -1, 0, 2, 4, 5, 7, 7, 7, 7, 9, 9, 9, 9, 11],
[12, 14, 15, 16, 18, 20, 20, 20, 21, 21, 22, 23, 23, 25, 25, 25, 26, 27, 29, 30],
[31, 31, 32, 32, 33, 35, 37, 39, 39, 39, 40, 41, 43, 44, 46, 48, 48, 48, 48, 50],
[52, 54, 55, 57, 57, 58, 58, 60, 62, 64, 65, 65, 65, 67, 69, 71, 71, 73, 74, 74],
[75, 76, 78, 78, 80, 82, 82, 82, 84, 85, 85, 87, 87, 89, 90, 90, 91, 93, 93, 94],
[96, 98, 100, 102, 104, 105, 107, 109, 111, 113, 113, 115, 115, 117, 119, 119, 120, 122, 122, 124],
[126, 127, 128, 130, 130, 130, 130, 132, 132, 133, 134, 136, 137, 138, 140, 141, 141, 143, 144, 146],
[148, 150, 151, 152, 154, 156, 157, 158, 159, 161, 161, 162, 162, 164, 164, 165, 167, 168, 169, 169],
[171, 173, 173, 175, 176, 178, 179, 181, 182, 183, 184, 184, 184, 185, 186, 186, 186, 186, 187, 189],
[190, 192, 192, 193, 195, 196, 197, 197, 198, 198, 198, 198, 198, 199, 201, 203, 204, 206, 208, 208],
[209, 210, 211, 212, 212, 212, 214, 214, 216, 217, 218, 218, 219, 221, 222, 224, 225, 227, 229, 229],
[230, 230, 230, 231, 233, 233, 234, 235, 237, 237, 238, 238, 240, 240, 242, 242, 244, 246, 246, 247],
[249, 251, 252, 252, 254, 254, 256, 256, 257, 258, 259, 260, 260, 261, 263, 265, 266, 267, 267, 269],
[271, 273, 273, 274, 274, 274, 276, 276, 276, 278, 279, 280, 280, 280, 282, 284, 284, 286, 286, 287],
[289, 290, 290, 291, 293, 293, 293, 293, 295, 296, 296, 297, 298, 299, 299, 301, 302, 304, 306, 308],
[309, 310, 311, 311, 312, 312, 314, 315, 317, 319, 320, 322, 323, 324, 324, 324, 326, 328, 329, 331],
[332, 334, 335, 337, 337, 339, 341, 343, 345, 347, 348, 348, 348, 348, 348, 350, 350, 350, 351, 352],
[353, 355, 355, 356, 357, 358, 360, 361, 361, 361, 362, 364, 364, 364, 365, 366, 368, 370, 370, 372],
[374, 376, 378, 380, 382, 382, 383, 384, 385, 385, 387, 388, 388, 390, 392, 394, 394, 396, 397, 399],
[400, 402, 403, 403, 405, 405, 407, 409, 411, 411, 413, 414, 415, 417, 418, 419, 419, 419, 421, 422]]
target = 271
print(searchMatrix(mat, target))
| [
"[email protected]"
] | |
61297baec7f9634e81732d93964cde44437a232f | 1c790b0adc648ff466913cf4aed28ace905357ff | /applications/vision/data/mnist/__init__.py | e4bdf2b45dfba9e56aea7e90fcee004bb9fd170b | [
"Apache-2.0"
] | permissive | LLNL/lbann | 04d5fdf443d6b467be4fa91446d40b620eade765 | e8cf85eed2acbd3383892bf7cb2d88b44c194f4f | refs/heads/develop | 2023-08-23T18:59:29.075981 | 2023-08-22T22:16:48 | 2023-08-22T22:16:48 | 58,576,874 | 225 | 87 | NOASSERTION | 2023-09-11T22:43:32 | 2016-05-11T20:04:20 | C++ | UTF-8 | Python | false | false | 2,472 | py | import gzip
import os
import os.path
import urllib.request
import google.protobuf.text_format
import lbann
# Paths
data_dir = os.path.dirname(os.path.realpath(__file__))
def download_data():
"""Download MNIST data files, if needed.
Data files are downloaded from http://yann.lecun.com/exdb/mnist/
and uncompressed. Does nothing if the files already exist.
"""
# MNIST data files and associated URLs
urls = {
'train-images-idx3-ubyte': 'https://ossci-datasets.s3.amazonaws.com/mnist/train-images-idx3-ubyte.gz',
'train-labels-idx1-ubyte': 'https://ossci-datasets.s3.amazonaws.com/mnist/train-labels-idx1-ubyte.gz',
't10k-images-idx3-ubyte': 'https://ossci-datasets.s3.amazonaws.com/mnist/t10k-images-idx3-ubyte.gz',
't10k-labels-idx1-ubyte': 'https://ossci-datasets.s3.amazonaws.com/mnist/t10k-labels-idx1-ubyte.gz',
}
# Download and uncompress MNIST data files, if needed
for data_file, url in urls.items():
data_file = os.path.join(data_dir, data_file)
compressed_file = data_file + '.gz'
if not os.path.isfile(data_file):
request = urllib.request.Request(
url,
headers={'User-Agent': 'LBANN/vision-app'},
)
with urllib.request.urlopen(request) as response, \
open(compressed_file, 'wb') as out_file:
out_file.write(response.read())
with gzip.open(compressed_file, 'rb') as in_file, \
open(data_file, 'wb') as out_file:
out_file.write(in_file.read())
def make_data_reader(validation_fraction=0.1):
"""Make Protobuf message for MNIST data reader.
MNIST data is downloaded if needed.
Args:
validation_fraction (float): The proportion of samples to be tested
as the validation dataset.
"""
# Download MNIST data files
download_data()
# Load Protobuf message from file
protobuf_file = os.path.join(data_dir, 'data_reader.prototext')
message = lbann.lbann_pb2.LbannPB()
with open(protobuf_file, 'r') as f:
google.protobuf.text_format.Merge(f.read(), message)
message = message.data_reader
if validation_fraction is not None:
assert message.reader[0].role == "train"
message.reader[0].validation_fraction = validation_fraction
# Set paths
for reader in message.reader:
reader.data_filedir = data_dir
return message
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.