blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3f23b2f1a0f51f385862ff8d5a5ea6f26947847e | b4cd6acd822dc2fbc97908aafc910e60bf597756 | /web_s/env/lib/python3.7/keyword.py | c829f53b9c895483ad9f7c5c7ba0a5b6f6bf08e7 | [] | no_license | vaibhavCodian/Stock-Prediction-Web-App | 868685786c43155ae4abcf7dd6c4590802faa168 | 54ca117150c71a2a017c0ba4b8d91324a7645a8b | refs/heads/master | 2021-02-12T14:06:53.216536 | 2020-04-28T17:50:24 | 2020-04-28T17:50:24 | 244,597,359 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48 | py | /home/vaibhav/anaconda3/lib/python3.7/keyword.py | [
"[email protected]"
] | |
f427c925290c5a2a81db95be3c0f18e6c3e33066 | dccd1058e723b6617148824dc0243dbec4c9bd48 | /atcoder/abc048/a.py | 2a32b441150b9a7e79505fe4330cbbf200516869 | [] | no_license | imulan/procon | 488e49de3bcbab36c624290cf9e370abfc8735bf | 2a86f47614fe0c34e403ffb35108705522785092 | refs/heads/master | 2021-05-22T09:24:19.691191 | 2021-01-02T14:27:13 | 2021-01-02T14:27:13 | 46,834,567 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 57 | py | for s in input().split():
print(s[0],end="")
print()
| [
"[email protected]"
] | |
7c1cf7586a7cc9ff5c8a7ecd4890b9115290f894 | f578bf168e4f8df91007bae7a2352a31cd98d375 | /CraftProtocol/Protocol/v1_8/Packet/Play/ConfirmTransactionClientPacket.py | 784d4f7628593a4131223067b1b3c32efe08486b | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Toranktto/CraftProtocol | 97a4e4f408e210494f6acbec0f30c477bb55f8fa | a6f4a67756c3868820ab76df5e148d76b020d990 | refs/heads/master | 2021-07-18T11:04:13.432733 | 2018-09-09T17:23:51 | 2018-09-09T17:23:51 | 144,491,218 | 11 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,528 | py | #!/usr/bin/env python
from CraftProtocol.Protocol.Packet.BasePacket import BasePacket
from CraftProtocol.Protocol.Packet.PacketDirection import PacketDirection
from CraftProtocol.StreamIO import StreamIO
class ConfirmTransactionClientPacket(BasePacket):
PACKET_ID = 0x32
PACKET_DIRECTION = PacketDirection.CLIENTBOUND
def __init__(self, window_id, transaction_id, accepted):
BasePacket.__init__(self)
self._window_id = int(window_id)
self._transaction_id = int(transaction_id)
self._accepted = bool(accepted)
def get_window_id(self):
return self._window_id
def set_window_id(self, window_id):
self._window_id = int(window_id)
def get_transaction_id(self):
return self._transaction_id
def set_transaction_id(self, transaction_id):
self._transaction_id = int(transaction_id)
def is_accepted(self):
return self._accepted
def set_accepted(self, accepted):
self._accepted = bool(accepted)
@staticmethod
def write(stream, packet):
StreamIO.write_byte(stream, packet.get_window_id())
StreamIO.write_short(stream, packet.get_transaction_id())
StreamIO.write_bool(stream, packet.is_accepted())
@staticmethod
def read(stream, packet_size):
window_id = StreamIO.read_byte(stream)
transaction_id = StreamIO.read_short(stream)
accepted = StreamIO.read_bool(stream)
return ConfirmTransactionClientPacket(window_id, transaction_id, accepted)
| [
"[email protected]"
] | |
f1ea3f92b383d226f7bc949e68085f65407309e1 | 97fd76dd9f2fd29a6418e26b8f8d21f330b58a9c | /tests/test_main.py | 2025733d3b9de2147a4a03c1f3d06c4a418c6312 | [
"MIT"
] | permissive | sudeep611/nepserate | d53d39ca4c204508621379787ac8d4cbb40a68d3 | b7247839ed1675eeaecf6cac4124507a35f6c8d3 | refs/heads/master | 2021-05-29T15:35:24.690452 | 2014-10-31T16:56:06 | 2014-10-31T16:56:06 | 25,625,531 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | # Test Script
# If this test pass then everything should work fine
from nepserate import ScrapeRate
import unittest
class TestScrapeRate(unittest.TestCase):
def test_result(self):
ns = ScrapeRate()
# Check if the return type is list
self.assertEqual(type(ns.getRate("ADBL")), type([]))
if __name__ == '__main__':
unittest.main() | [
"[email protected]"
] | |
22cd4aa937ae8cfd23745a3259f156cd50b64a4e | cb3583cc1322d38b1ee05cb1c081e0867ddb2220 | /donor/migrations/0014_auto_20210331_0404.py | b1189bdce3ff86f5f1436a2a55ec393aa74d80f9 | [
"MIT"
] | permissive | iamgaddiel/codeupblood | 9e897ff23dedf5299cb59fd6c44d9bd8a645e9c6 | a0aa1725e5776d80e083b6d4e9e67476bb97e983 | refs/heads/main | 2023-05-07T23:34:27.475043 | 2021-04-24T20:49:08 | 2021-04-24T20:49:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | # Generated by Django 3.1.6 on 2021-03-31 11:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('donor', '0013_auto_20210330_0743'),
]
operations = [
migrations.AlterField(
model_name='appointment',
name='d_id',
field=models.CharField(default='oiapGX', max_length=50),
),
]
| [
"[email protected]"
] | |
fce283892ba59dcf2ba42e224830b42612d88aa5 | ec3e9925af8742d578fd11aac6f000ced71aa9f5 | /crm_app/migrations/0001_initial.py | a8d2064e20aeff0443aad84487887d739acbfa32 | [] | no_license | amrit-kumar/CRM-Customer-relationship-management- | cfd3ec42a975e7b987d76abe465cb2ec9eec62b4 | d41b482166557e17825b2a010d24bb03ee469245 | refs/heads/master | 2021-06-25T06:37:51.721771 | 2017-08-12T09:43:23 | 2017-08-12T09:43:23 | 96,964,635 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,216 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2017-01-17 10:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='MsgReports',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('request_id', models.CharField(blank=True, max_length=250, null=True)),
('user_id', models.CharField(blank=True, max_length=250, null=True)),
('date', models.DateTimeField(blank=True, null=True)),
('discription', models.CharField(blank=True, max_length=250, null=True)),
('number', models.BigIntegerField(blank=True, null=True)),
('sender_id', models.CharField(blank=True, max_length=250, null=True)),
('campaign_name', models.CharField(blank=True, max_length=250, null=True)),
('status', models.CharField(blank=True, choices=[('1', '1'), ('2', '2'), ('3', '3')], max_length=250, null=True)),
],
),
]
| [
"[email protected]"
] | |
c833bef47a138873f53849c9dffd03c4f3bb8c82 | f54e711cb7fa9ec0295d1e5519fde39778299c48 | /blogProject/myApp/migrations/0004_comment_name.py | 0e5793a15f0e9b585a0ce290649d05cbf9f3c64e | [] | no_license | Sushma-RV99/blog-repo | bb9c795784fd82178384ede75ef369d64997fa1a | a1443c963fdcaaf38904b3f6faa90401a396564e | refs/heads/master | 2023-02-11T06:07:48.709194 | 2021-01-08T14:23:36 | 2021-01-08T14:23:36 | 327,924,223 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | # Generated by Django 2.2 on 2021-01-01 15:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myApp', '0003_comment'),
]
operations = [
migrations.AddField(
model_name='comment',
name='name',
field=models.CharField(default=1, max_length=50),
preserve_default=False,
),
]
| [
"[email protected]"
] | |
0ff0703817449a164cc4148e5e772d7aad82761d | 20a0bd0a9675f52d4cbd100ee52f0f639fb552ef | /transit_odp/data_quality/migrations/0010_auto_20191118_1604.py | 1dbd2499c70b6991917a996f3979d7d53de8b877 | [] | no_license | yx20och/bods | 2f7d70057ee9f21565df106ef28dc2c4687dfdc9 | 4e147829500a85dd1822e94a375f24e304f67a98 | refs/heads/main | 2023-08-02T21:23:06.066134 | 2021-10-06T16:49:43 | 2021-10-06T16:49:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,602 | py | # Generated by Django 2.2.7 on 2019-11-18 16:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("data_quality", "0009_auto_20191118_1029"),
]
operations = [
migrations.RemoveField(
model_name="service",
name="report",
),
migrations.AddField(
model_name="service",
name="ito_id",
field=models.TextField(default=None, unique=True),
preserve_default=False,
),
migrations.AddField(
model_name="service",
name="reports",
field=models.ManyToManyField(
related_name="services", to="data_quality.DataQualityReport"
),
),
migrations.AddField(
model_name="servicelink",
name="ito_id",
field=models.TextField(default=None, unique=True),
preserve_default=False,
),
migrations.AddField(
model_name="servicepattern",
name="ito_id",
field=models.TextField(default=None, unique=True),
preserve_default=False,
),
migrations.AddField(
model_name="timingpattern",
name="ito_id",
field=models.TextField(default=None, unique=True),
preserve_default=False,
),
migrations.AddField(
model_name="vehiclejourney",
name="ito_id",
field=models.TextField(default=None, unique=True),
preserve_default=False,
),
]
| [
"[email protected]"
] | |
c941709fbed0b9fa452dac0e4e3ea4916d99de51 | 3b630e8ffae16049b09ea90b3d4af4e2c7b9483b | /firstphy.py | 35ea0b20e4778b407114c119c477c625d43f2d8e | [] | no_license | shafifx/myhub | fe91a2d46c0ba7f7d58057e1d05aecc067989fc9 | a3939fe4743a80535af1334f1f7fc78f28482745 | refs/heads/main | 2023-06-06T22:34:09.271540 | 2021-07-08T16:17:53 | 2021-07-08T16:17:53 | 383,184,433 | 0 | 0 | null | 2021-07-08T16:17:53 | 2021-07-05T15:21:38 | Python | UTF-8 | Python | false | false | 43 | py | hry pythonhttps://github.com/shafifx/myhub
| [
"[email protected]"
] | |
33eadd24b48302614418717e1f4b4966a2618001 | 5b6ec20f6a62e2daf46e13c8740e9d8c8f4ff0e2 | /mvloader/nrrd.py | 68430da6cbbd010a39c01834606f84bae565ca30 | [
"MIT"
] | permissive | dichetao/mvloader | 946575ee2cad2daa2d4ae507ba44bf120e100966 | 4244ba30f4c8f92ccf7605dc0134ef32706a70a2 | refs/heads/master | 2021-09-23T14:23:26.151457 | 2018-09-24T14:11:24 | 2018-09-24T14:11:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,901 | py | #!/usr/bin/env python
# coding: utf-8
"""A module for reading NRRD files [NRRD1]_, basically a wrapper for calls on the pynrrd library [NRRD2]_.
References
----------
.. [NRRD1] http://teem.sourceforge.net/nrrd/format.html (20180212)
.. [NRRD2] https://github.com/mhe/pynrrd (20180212).
"""
import nrrd
import numpy as np
from mvloader.volume import Volume
def open_image(path, verbose=True):
"""
Open a 3D NRRD image at the given path.
Parameters
----------
path : str
The path of the file to be loaded.
verbose : bool, optional
If `True` (default), print some meta data of the loaded file to standard output.
Returns
-------
Volume
The resulting 3D image volume, with the ``src_object`` attribute set to the tuple `(data, header)` returned
by pynrrd's ``nrrd.read`` (where `data` is a Numpy array and `header` is a dictionary) and the desired
anatomical world coordinate system ``system`` set to "RAS".
Raises
------
IOError
If something goes wrong.
"""
try:
src_object = (voxel_data, hdr) = nrrd.read(path)
except Exception as e:
raise IOError(e)
if verbose:
print("Loading image:", path)
print("Meta data:")
for k in sorted(hdr.keys(), key=str.lower):
print("{}: {!r}".format(k, hdr[k]))
__check_data_kinds_in(hdr)
src_system = __world_coordinate_system_from(hdr) # No fixed world coordinates for NRRD images!
mat = __matrix_from(hdr) # Voxels to world coordinates
# Create new ``Volume`` instance
volume = Volume(src_voxel_data=voxel_data, src_transformation=mat, src_system=src_system, system="RAS",
src_object=src_object)
return volume
def save_image(path, data, transformation, system="RAS", kinds=None):
"""
Save the given image data as a NRRD image file at the given path.
Parameters
----------
path : str
The path for the file to be saved.
data : array_like
Three-dimensional array that contains the voxels to be saved.
transformation : array_like
:math:`4×4` transformation matrix that maps from ``data``'s voxel indices to the given ``system``'s anatomical
world coordinate system.
system : str, optional
The world coordinate system to which ``transformation`` maps the voxel data. Either "RAS" (default), "LAS", or
"LPS" (these are the ones supported by the NRRD format).
kinds : str or sequence of strings, optional
If given, the string(s) will be used to set the NRRD header's "kinds" field. If a single string is given, it
will be used for all dimensions. If multiple strings are given, they will be used in the given order. If
nothing is given (default), the "kinds" field will not be set. Note that all strings should either be "domain"
or "space".
"""
if data.ndim > 3:
raise RuntimeError("Currently, mvloader supports saving NRRD files with scalar data only!")
# Create the header entries from the transformation
space = system.upper()
space_directions = transformation[:3, :3].T.tolist()
space_origin = transformation[:3, 3].tolist()
options = {"space": space, "space directions": space_directions, "space origin": space_origin}
if kinds is not None:
kinds = (data.ndim * [kinds]) if isinstance(kinds, str) else list(kinds)
options["kinds"] = kinds
nrrd.write(filename=path, data=data, options=options)
def save_volume(path, volume, src_order=True, src_system=True, kinds=None):
"""
Save the given ``Volume`` instance as a NRRD image file at the given path.
Parameters
----------
path : str
The path for the file to be saved.
volume : Volume
The ``Volume`` instance containing the image data to be saved.
src_order : bool, optional
If `True` (default), order the saved voxels as in ``src_data``; if `False`, order the saved voxels as in
``aligned_data``. In any case, the correct transformation matrix will be chosen.
src_system : bool, optional
If `True` (default), try to use ``volume``'s ``src_system`` as the anatomical world coordinate system for
saving; if `False`, try to use ``volume``'s ``system`` instead. In either case, this works if the system is
either "RAS", "LAS", or "LPS" (these are the ones supported by the NRRD format). If a different system is
given, use "RAS" instead.
kinds : str or sequence of strings, optional
If given, the string(s) will be used to set the NRRD header's "kinds" field. If a single string is given, it
will be used for all dimensions. If multiple strings are given, they will be used in the given order. If
nothing is given (default), the "kinds" field will not be set. Note that all strings should either be "domain"
or "space".
"""
if volume.aligned_data.ndim > 3:
raise RuntimeError("Currently, mvloader supports saving NRRD files with scalar data only!")
system = volume.src_system if src_system else volume.system
system = system if system in ["RAS", "LAS", "LPS"] else "RAS"
if src_order:
data = volume.src_data
transformation = volume.get_src_transformation(system)
else:
data = volume.aligned_data
transformation = volume.get_aligned_transformation(system)
save_image(path, data=data, transformation=transformation, system=system, kinds=kinds)
def __check_data_kinds_in(header):
"""
Sanity check on the header's "kinds" field: are all entries either "domain" or "space" (i.e. are we really dealing
with scalar data on a spatial domain)?
Parameters
----------
header : dict
A dictionary containing the NRRD header (as returned by ``nrrd.read``, for example).
Returns
-------
None
Simply return if everything is ok or the "kinds" field is not set.
Raises
------
IOError
If the "kinds" field contains entries other than "domain" or "space".
"""
kinds = header.get("kinds")
if kinds is None:
return
for k in kinds:
if k.lower() not in ["domain", "space"]:
raise IOError("At least one data dimension contains non-spatial data!")
def __world_coordinate_system_from(header):
"""
From the given NRRD header, determine the respective assumed anatomical world coordinate system.
Parameters
----------
header : dict
A dictionary containing the NRRD header (as returned by ``nrrd.read``, for example).
Returns
-------
str
The three-character uppercase string determining the respective anatomical world coordinate system (such as
"RAS" or "LPS").
Raises
------
IOError
If the header is missing the "space" field or the "space" field's value does not determine an anatomical world
coordinate system.
"""
try:
system_str = header["space"]
except KeyError as e:
raise IOError("Need the header's \"space\" field to determine the image's anatomical coordinate system.")
if len(system_str) == 3:
# We are lucky: this is already the format that we need
return system_str.upper()
# We need to separate the string (such as "right-anterior-superior") at its dashes, then get the first character
# of each component. We cannot handle 4D data nor data with scanner-based coordinates ("scanner-...") or
# non-anatomical coordinates ("3D-...")
system_components = system_str.split("-")
if len(system_components) == 3 and not system_components[0].lower() in ["scanner", "3d"]:
system_str = "".join(c[0].upper() for c in system_components)
return system_str
raise IOError("Cannot handle \"space\" value {}".format(system_str))
def __matrix_from(header):
"""
Calculate the transformation matrix from voxel coordinates to the header's anatomical world coordinate system.
Parameters
----------
header : dict
A dictionary containing the NRRD header (as returned by ``nrrd.read``, for example).
Returns
-------
numpy.ndarray
The resulting :math:`4×4` transformation matrix.
"""
try:
space_directions = header["space directions"]
space_origin = header["space origin"]
except KeyError as e:
raise IOError("Need the header's \"{}\" field to determine the mapping from voxels to world coordinates.".format(e))
# "... the space directions field gives, one column at a time, the mapping from image space to world space
# coordinates ... [1]_" -> list of columns, needs to be transposed
trans_3x3 = np.array(space_directions).T
trans_4x4 = np.eye(4)
trans_4x4[:3, :3] = trans_3x3
trans_4x4[:3, 3] = space_origin
return trans_4x4
| [
"[email protected]"
] | |
22eee1e5ce8c1bd1a482bcb953ffebe1d366e5d7 | 1c73ef51b70251ed6ed24ce7a9ea08e601b602dd | /insert_mysql.py | 2672456d4f9ab5af82e2e0d026f684b8fa32ba81 | [] | no_license | java2man/restful-client | 21487f0a8361c499277d6863e86d0fdf4060ff46 | d1cc2a3b3995214d9c71ad50a5149f145fd3063e | refs/heads/master | 2021-01-22T04:11:07.756990 | 2017-02-10T01:50:50 | 2017-02-10T01:50:50 | 81,513,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,067 | py | # -*- coding: utf-8 -*-
import sys
import mysql.connector
from mysql.connector import conversion
class DBOperator:
def __init__(self, user, password, host, database):
self.conn = mysql.connector.connect(user=user, password=password, host=host, database=database)
self.cur = self.conn.cursor()
def myprint(self, s):
sys.stdout.buffer.write(s.encode('cp932', errors='replace'))
def createTable(self, table_name, json):
sql = "create table IF NOT EXISTS " + table_name + "("
keys = json.keys()
for key in keys:
if(key == 'links'):
continue
if(key == 'group'):
key = '_group'
if(key == '_id'):
sql = sql + key + " INT NOT NULL PRIMARY KEY,"
else:
sql = sql + key + " " + "TEXT,"
sql = sql[:-1] + ")"
#self.myprint(sql)
self.cur.execute(sql)
self.conn.commit()
self.cur.close
self.conn.close
def insertTable(self, table_name, json):
sql_insert = "insert ignore into " + table_name + "("
sql_values = "values("
keys = json.keys()
for key in keys:
value = str(json[key])
if(key == 'links'):
continue
if(key == 'group'):
key = '_group'
sql_insert = sql_insert + key + ","
sql_values = sql_values + "'" + (value.replace("'", "''")).replace("\\", "\\\\") + "',"
sql = sql_insert[:-1] + ") " + sql_values[:-1] + ")"
#self.myprint(sql)
self.addColumnIfNeed(table_name, sql)
#self.cur.execute(sql)
self.conn.commit()
self.cur.close
self.conn.close
def alterTable(self, table_name, column_name):
sql_alter = "ALTER TABLE " + table_name + " ADD COLUMN " + column_name + " TEXT"
self.cur.execute(sql_alter)
self.conn.commit()
def addColumnIfNeed(self, table_name, sql):
try:
self.cur.execute(sql)
except mysql.connector.ProgrammingError as e:
str1 = "Unknown column '"
str2 = "' in 'field list'"
field = ''
if(str1 in str(e) and str2 in str(e)):
index1 = str(e).index(str1) + len(str1)
field = str(e)[index1:len(str(e)) - len(str2)]
print(field)
self.alterTable(table_name, field)
self.addColumnIfNeed(table_name, sql) | [
"[email protected]"
] | |
b933acdeb2309ba593e6d9e9d9d667aff904c210 | 520c5877c4f7e33b66a955bde8eb0b613b99666a | /lyric_generation/embedding.py | 4970fcff859b9802952e2bb645b2b409cfea1c74 | [] | no_license | richardsavery/interactive-hiphop | 53db132369bb354c626d5a28635d1bba857a12d8 | 41bb1b7a7eb2a6bc1eb33a7f4cdf640e4cda7ff1 | refs/heads/master | 2022-02-23T20:30:52.354441 | 2022-02-10T03:58:17 | 2022-02-10T03:58:17 | 206,121,157 | 3 | 2 | null | 2022-02-11T03:00:55 | 2019-09-03T16:21:45 | Python | UTF-8 | Python | false | false | 829 | py | from gensim.test.utils import datapath, get_tmpfile
from gensim.models import KeyedVectors
from gensim.scripts.glove2word2vec import glove2word2vec
import pickle
# GLOVE_FILE = "/Users/brianmodel/Desktop/gatech/VIP/interactive-hiphop/lyric_generation/glove.840B.300d.txt"
WORD2VEC_FILE = "/Users/brianmodel/Desktop/gatech/VIP/interactive-hiphop/lyric_generation/GoogleNews-vectors-negative300.bin"
def glove_to_word2vec():
glove_file = datapath(GLOVE_FILE)
tmp_file = get_tmpfile(WORD2VEC_FILE)
_ = glove2word2vec(glove_file, tmp_file)
# model = KeyedVectors.load_word2vec_format(tmp_file)
def get_embedding():
return KeyedVectors.load_word2vec_format(WORD2VEC_FILE, binary=True)
model = get_embedding()
with open('word2vec.model', 'wb') as model_file:
pickle.dump(model, model_file)
print(model)
| [
"[email protected]"
] | |
3979ad9eea86bdb41c9f35d9812b87941eb06226 | 5ed4a4dc164791157d089568a6a256372262f6d4 | /7.9.aux_NM.py | 0f4e42b0ac10c712a35ad01de06f2b6434811b62 | [] | no_license | Valkyries12/algoritmos-programacion1 | 72cf6c2146ff8822b1ff8aa8bf6d7c4c677fc55e | 20a5861c85e2841fdba9574c4b08cec6b2200b15 | refs/heads/master | 2020-07-02T08:16:53.670119 | 2019-08-18T01:59:09 | 2019-08-18T01:59:09 | 201,470,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 538 | py | #Ejercicio 7.9. Escribir una función que reciba como parámetro una cadena de palabras separadas por espacios y devuelva, como resultado, cuántas palabras de más de cinco letras tiene la cadena dada.
def cantidad_mas_de_cinco(cadena):
""" devuelve cuantas palabras tienen mas de 5 letras """
lista = cadena.split()
cantidad = 0
for elemento in lista:
if len(elemento) > 5:
cantidad += 1
return cantidad
print(cantidad_mas_de_cinco("Habia una vez un barco chiquitito que nadaba sin atencion")) | [
"[email protected]"
] | |
78f31a9c174255d188697506e1941c866f62891c | 8f949493064b77dd3f19ceeed1e86382ace176d6 | /posts/urls.py | 3f113ad6817989d01a71ca2970489a00507bc58f | [] | no_license | sudhanshu-jha/simplesocial | 44a19a1b1051dcc8577de5d87660a5b890b829d1 | 6d40293be75703d5498025150acf9e91bae6f77c | refs/heads/master | 2020-04-17T07:41:54.207867 | 2019-01-18T10:24:14 | 2019-01-18T10:24:14 | 135,698,629 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 494 | py | from django.conf.urls import url
from . import views
app_name = "posts"
urlpatterns = [
url(r"^$", views.PostList.as_view(), name="all"),
url(r"new/$", views.CreatePost.as_view(), name="create"),
url(r"by/(?P<username>[-\w]+)/$", views.UserPosts.as_view(), name="for_user"),
url(
r"by/(?P<username>[-\w]+)/(?P<pk>\d+)/$",
views.PostDetail.as_view(),
name="single",
),
url(r"delete/(?P<pk>\d+)/$", views.DeletePost.as_view(), name="delete"),
]
| [
"[email protected]"
] | |
d1a50b99473a4235042bb673ae4d5648722d7914 | 720dcd12b8fb7ab26125317a6f3d00c2623e5f13 | /chatbotQuery/__init__.py | fe8fcde48e539b7f3222f7e172a5b2d88236c54b | [
"MIT"
] | permissive | tgquintela/chatbot_query | 78e6f21268e06572009295c271c277ef89f2dcbc | 4c5160992a444f828da019ae57a802467a13c2fa | refs/heads/master | 2021-01-01T18:00:46.261089 | 2017-10-13T18:03:32 | 2017-10-13T18:03:32 | 98,224,976 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,896 | py |
"""
TODO
----
Decorator for message collections
"""
import copy
class ChatbotMessage(dict):
"""
Compulsary elements
-------------------
- message
- collection
- from [user, bot]
"""
def __init__(self, message):
self.update({'message': '', 'collection': False})
self.update(message)
assert('from' in self)
assert('message' in self)
assert('collection' in self)
@classmethod
def from_message(cls, message):
if isinstance(message, ChatbotMessage):
return message
return cls(message)
@classmethod
def from_candidates_messages(cls, message):
message.update({'from': 'bot'})
if type(message['message']) == str:
message['collection'] = False
elif type(message['message']) == list:
message['collection'] = True
return cls(message)
@classmethod
def fake_user_message(cls):
return cls({'from': 'user'})
@property
def last_message_text(self):
if self['collection']:
return self['message'][-1]['message']
else:
return self['message']
def get_last_post(self):
_, last_post = self._filter_message_2_post()
for p in last_post:
yield p
def get_post(self):
posts, _ = self._filter_message_2_post()
for p in posts:
yield p
def get_all_messages(self):
for p in self.get_post():
yield p
for p in self.get_last_post():
yield p
def format_message(self, format_information):
if self['collection']:
self['message'][-1]['message'] =\
self['message'][-1]['message'].format(**format_information)
else:
self['message'] = self['message'].format(**format_information)
return self
def reflect_message(self, pre_message):
for key in pre_message:
if key not in ['message', 'from', 'time', 'answer_status',
'sending_status', 'collection', 'posting_status']:
self[key] = pre_message[key]
return self
def reflect_metadata(self, pre_message):
for key in pre_message:
if key not in self:
if key not in ['message', 'from', 'time', 'answer_status',
'sending_status', 'collection']:
self[key] = pre_message[key]
return self
def keep_query(self, pre_message):
if 'query' in pre_message:
if 'query' in self:
if self['query'] is None:
self['query'] = pre_message['query']
else:
self['query'] = pre_message['query']
return self
def _if_possible_send(self, message):
logi = True
logi = logi and (message['from'] == 'bot')
logi = logi and (message['message'] != '')
return logi
def _filter_message_2_post(self):
posts, last_post = [], []
if self['collection']:
messages = [m for m in self['message']
if self._if_possible_send(m)]
if len(messages):
last_post = [messages[-1]]
posts = messages[:-1]
else:
if self._if_possible_send(self):
last_post = [copy.copy(self)]
return posts, last_post
def _detect_message_sending_status(self):
if 'sending_status' in self:
return self['sending_status']
return True
def _preformat_collection_messages(self):
if not self._detect_message_sending_status():
if not self['collection']:
self['message'] = [copy.copy(self)]
self['collection'] = True
return self
return self
def _is_prepared(self, message):
if message['message'] == '':
return False
if 'sending_status' in self:
return self['sending_status']
if 'posting_status' in self:
return self['posting_status']
def is_prepared(self):
if self['collection']:
return any([self._is_prepared(e) for e in self['message']])
else:
return self._is_prepared(self)
return False
def add_tags(self, tags):
if tags is not None and (type(tags) in [list, str]):
tags = tags if type(tags) == list else [tags]
if 'tags' in self:
old_tags = self['tags']
old_tags += tags
old_tags = list(set(old_tags))
self['tags'] = old_tags
else:
self['tags'] = tags
if self['collection']:
if 'tags' in self['message'][-1]:
old_tags = self['message'][-1]['tags']
old_tags += tags
old_tags = list(set(old_tags))
self['message'][-1]['tags'] = old_tags
self['tags'] = old_tags
else:
self['message'][-1]['tags'] = tags
return self
def collapse_message(self, message):
self._preformat_collection_messages()
if self['collection']:
messagestext = copy.copy(self['message'])
if message['collection']:
messagestext += message['message']
else:
messagestext.append(message)
self.update(message)
self['message'] = messagestext
self['collection'] = True
self.check_message()
return self
else:
output_message = copy.copy(message)
output_message['collection'] = False
if 'query' in message:
output_message['query'] = message['query']
output_message =\
ChatbotMessage.from_candidates_messages(output_message)
output_message.check_message()
return output_message
def add_selector_types(self, selector_types):
## Store results in message
self['selector_types'] = selector_types
return self
def add_entry_to_last_message(self, entry_var, var):
self[entry_var] = var
if self['collection']:
self['message'][-1][entry_var] = var
return self
def structure_answer(self):
## Input selector types
if self['collection']:
self['message'][-1]['selector_types'] = self['selector_types']
self.check_message()
return self
def check_message(self):
if self['collection']:
assert(all([isinstance(m, dict) for m in self['message']]))
assert(all([isinstance(m['message'], str)
for m in self['message']]))
else:
assert(isinstance(self['message'], str))
| [
"[email protected]"
] | |
d1193d44c5e75ff605a3a7007ffa13f5294f8fb5 | e6120961ab5a2005e86cf772e56d694878b8cb35 | /Finance/Old_Finance/MyFinance8.py | c3c87e529104e1875500a06c8783c14c4d4e7ac1 | [] | no_license | marcelo-alves87/STN-PythonLearning | a81e1d379dc28fd0334883dc7f930c7aadc6f047 | c2d31c0ae55a302d8cd35636ed02673452536f8e | refs/heads/master | 2023-08-22T09:05:56.313556 | 2023-08-20T21:33:16 | 2023-08-20T21:33:16 | 139,202,866 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,580 | py | import numpy as np
import pandas as pd
import pandas_datareader.data as web
from collections import Counter
from sklearn import svm, cross_validation, neighbors
from sklearn.ensemble import VotingClassifier, RandomForestClassifier
def process_data_for_labels(ticker):
hm_days = 7
df = pd.read_csv('ibovespa_joined_closes.csv', index_col=0)
tickers = df.columns.values.tolist()
df.fillna(0, inplace=True)
for i in range(1, hm_days+1):
df['{}_{}d'.format(ticker, i)] = (df[ticker].shift(-i) - df[ticker]) / df[ticker]
df.fillna(0, inplace=True)
return tickers, df
def buy_sell_hold(*args):
cols = [c for c in args]
requirement = 0.02
for col in cols:
if col > requirement:
return 1
if col < -requirement:
return -1
return 0
def extract_featuresets(ticker):
tickers, df = process_data_for_labels(ticker)
df['{}_target'.format(ticker)] = list(map( buy_sell_hold,
df['{}_1d'.format(ticker)],
df['{}_2d'.format(ticker)],
df['{}_3d'.format(ticker)],
df['{}_4d'.format(ticker)],
df['{}_5d'.format(ticker)],
df['{}_6d'.format(ticker)],
df['{}_7d'.format(ticker)]))
vals = df['{}_target'.format(ticker)].values.tolist()
str_vals = [str(i) for i in vals]
print('Data spread:', Counter(str_vals))
df.fillna(0, inplace=True)
df = df.replace([np.inf, -np.inf], np.nan)
df.dropna(inplace=True)
df_vals = df[[ticker for ticker in tickers]].pct_change()
df_vals = df_vals.replace([np.inf, -np.inf], 0)
df_vals.fillna(0, inplace=True)
X = df_vals.values
y = df['{}_target'.format(ticker)].values
return X, y, df
def do_ml(ticker):
X, y, df = extract_featuresets(ticker)
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.25)
clf = VotingClassifier([('lsvc', svm.LinearSVC()),
('knn', neighbors.KNeighborsClassifier()),
('rfor', RandomForestClassifier())])
clf.fit(X_train, y_train)
confidence = clf.score(X_test, y_test)
print('accuracy:', confidence)
predictions = clf.predict(X_test)
print('predicted class counts:', Counter(predictions))
return confidence
do_ml('ABEV3')
| [
"[email protected]"
] | |
0f0a43f2a910cb3bd27dccab958083608f47a592 | 0258e0c9595406ceb3de32067aff776bc2a58fa8 | /06_p12.py | a649f413d98bebdcef131856db0da2a3d6949b5d | [] | no_license | akromibn37/python_code | 72c016c361b3ba2e04c83e1d1a703171b0bd8819 | 41d1a09f8ec8696e37ad83c1a0cb6506c7f0f4f6 | refs/heads/master | 2020-03-21T22:57:25.111642 | 2018-06-29T14:14:33 | 2018-06-29T14:14:33 | 139,157,588 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | data = input().strip()
l = []
for x in range(len(data)):
l.append(data[x])
num = int(input().strip())
out = ""
i = 0
while i<num:
out = ""
command = [e for e in input().split()]
if command[0] == "in":
l.insert(int(command[2]),command[1])
elif command[0] == "out":
l.pop(int(command[1]))
elif command[0] == "swap":
x = l[int(command[1])]
y = l[int(command[2])]
l[int(command[1])] = y
l[int(command[2])] = x
for j in range(len(l)):
out += l[j]
print(out)
i+=1
| [
"[email protected]"
] | |
261b9e2fe87ce74a8028d94c3c61852211f01d39 | 1d482878230a6c6cbef7680f3910561a4b35c35c | /element/scripts/migrations/0021_auto_20180218_0632.py | dacc71e2958595a88be73d43e5bd6e43cab8ed4d | [] | no_license | karthikvasudevan92/elem | ac5355fe029251b7de76428a558049ab949689df | f5dad5cdfaba736843d29c781ec253d2cee51ccd | refs/heads/master | 2021-04-28T03:47:23.643252 | 2018-03-17T13:10:07 | 2018-03-17T13:10:07 | 122,144,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 476 | py | # Generated by Django 2.0.1 on 2018-02-18 06:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('scripts', '0020_auto_20180218_0621'),
]
operations = [
migrations.AlterField(
model_name='script_line',
name='script',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='scripts.Script'),
),
]
| [
"[email protected]"
] | |
e8a033aa3f8f48fd75b3f9aca077478771f2bb75 | 01f09bdec77ee7823919824ac25cb1a3610790cb | /d2_Assignment_07_a.py | 8a77cb92183fcedb51f410582e51d30fbffb372e | [] | no_license | SYN2002/PYTHON-LAB | 3da5bda642d7a139ccf39e350750da8d4f5128d0 | dd93fa884415f423988375f2d3b0f058bc253135 | refs/heads/main | 2023-08-27T11:53:53.075132 | 2021-10-17T15:31:00 | 2021-10-17T15:31:00 | 406,604,738 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | py | n1=int(input("Enter the lower limit: "))
n2=int(input("Enter the uper limit: "))
i,j,c=1,0,0
print("The prime numbers are: ")
for i in range(n1,n2+1):
c=0
for j in range(1,i+1):
if(i%j==0):
c=c+1
if(c==2):
print(i,end=" ") | [
"[email protected]"
] | |
46e316c0cc99e56127b71e1ee886f2f946be6258 | 1b6da6feaeeaa3801279781ab8421e7294c5b393 | /python27/py_auto_test/test_cases/utility_mysql.py | f2253b0fd0e97f23add55b6d6364cf3cd8efa204 | [] | no_license | doorhinges0/my_projects | 703bbc92425e6c0604088d546b84be6dca37c0cd | f981ca0bfd79c3a119cd52155028f3f338378690 | refs/heads/master | 2021-01-13T12:00:06.992906 | 2015-12-28T12:00:42 | 2015-12-28T12:00:42 | 48,828,883 | 0 | 1 | null | 2015-12-31T02:24:37 | 2015-12-31T02:24:37 | null | UTF-8 | Python | false | false | 1,354 | py | #!/usr/bin/env python
#-*- coding:utf-8 -*-
import mysql.connector
# 注意把password设为你的root口令:
conn = mysql.connector.connect(user='root', password='trxhfly', database='account_system_db', use_unicode=True)
def get_user(type, id):
if type not in ('uid','acc','phone','email'):
return None
if not id:
return None
user=None
values=None
uid=None
query=None
if type=='uid':
query='select * from user_info where uid=%s' % id
elif type=='acc':
query='select * from user_info where acc="%s"' % id
elif type=='phone':
query='select * from user_info where phone="%s"' % id
elif type=='email':
query='select * from user_info where email="%s"' % id
if query:
print('query=',query)
#values=redis_obj.hmget(uid,user_fields)
cursor = conn.cursor(dictionary=True)
cursor.execute(query)
values = cursor.fetchall()
# 关闭Cursor和Connection:
cursor.close()
if 1==len(values):
user=values[0]
return user
if '__main__'==__name__:
'''
user=get_user('uid','100000')
print(user)
print(user['uid'])
print(user['acc'])
'''
user=get_user('acc','svqymidfc6m9')
if user:
print(user)
print(user['uid'])
print(user['acc'])
| [
"[email protected]"
] | |
d4db6548602d798752d08f491cc5d988c7ab352a | ce46bd76dac66e5ff6cfa6556c9d549af324f48e | /ml-progress-bot/download_media.py | 41ffb527fb8f60c7b463737282c8784264a7a0f9 | [
"MIT"
] | permissive | yell/kaggle-camera | f21b56277c278395496dc78bafbdb41de60439de | 7b471c3631343f6f7fd7adf1a80b2edb46d62f0b | refs/heads/master | 2023-08-26T04:49:11.158767 | 2021-11-05T14:54:58 | 2021-11-05T14:54:58 | 117,623,865 | 8 | 5 | null | null | null | null | UTF-8 | Python | false | false | 272 | py | from telethon.sync import TelegramClient
api_id = 'TODO'
api_hash = 'TODO'
client = TelegramClient('test_session', api_id, api_hash)
client.start()
print(dir(client))
for message in client.get_messages('ml_progress_bot', limit=10000):
client.download_media(message)
| [
"[email protected]"
] | |
67cd9c4d1cbc6de5a7be578e14c812fc18dd3f18 | 2edbe77571e522722a759921cd45cf6ff540e87d | /quene/marketmodel.py | da9893a00121afcdaeb56f737460e67203feb162 | [] | no_license | nasty11pig/pyStudy | 5253da4334c3e51bff938d1343a85ff76cd1c0d6 | da33c6c59bc84689c4aae8771be4ad36671ab5bf | refs/heads/master | 2020-03-17T01:08:35.501268 | 2018-05-26T15:51:38 | 2018-05-26T15:51:38 | 133,141,454 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 852 | py | # -*- coding: utf-8 -*-
class MarketModel():
def __init__(self, lengthofSimulation, averageTimePerCus,
probabilityofNewArrival):
self._probabilityofNewArrival = probabilityofNewArrival
self._averageTimeperCus = averageTimePerCus
self._lenthofSimulation = lengthofSimulation
self._cashier = Cashier()
def runSimulation(self):
for currentTime in range(self._lengthofSimulation):
customer = Customer.generateCustomer(
self._probabilityofNewArrival,
currentTime,
self._arraveTimePerCus)
if customer != None:
self._cashier.addCustomer(customer)
self._cashier.serveCustomers(currentTime)
def __str__(self):
return str(self._cashier)
| [
"[email protected]"
] | |
2d85e566ab46559127ff094934cff6b9e3b4a756 | e72db255e41332c113f929eb63815b2169038209 | /Chapter08/audio-encode-server-4/audio_encode_server/s3.py | 8585e1faf5d52e430754cde9e22635bf0eee6396 | [
"MIT"
] | permissive | PacktPublishing/Hands-On-Reactive-Programming-with-Python | b196b971fe49a36da9f979790b8c31c98a659031 | 757d45e2023032c6074e26ad252530f3c89978bf | refs/heads/master | 2023-02-07T01:03:37.648175 | 2023-02-05T18:21:17 | 2023-02-05T18:21:38 | 128,761,473 | 75 | 19 | null | null | null | null | UTF-8 | Python | false | false | 2,077 | py | import asyncio
from collections import namedtuple
from io import BytesIO
import reactivex as rx
import boto3
from boto3.session import Session
from cyclotron import Component
Source = namedtuple('Source', ['response'])
Sink = namedtuple('Sink', ['request'])
# Sink objects
Configure = namedtuple('Configure', [
'access_key', 'secret_key',
'bucket', 'endpoint_url', 'region_name'])
UploadObject = namedtuple('UploadObject', ['key', 'data', 'id'])
# Source objects
UploadReponse = namedtuple('UploadReponse', ['key', 'id'])
def make_driver(loop=None):
if loop is None:
loop = asyncio.get_event_loop()
def driver(sink):
def on_subscribe(observer, scheduler):
client = None
bucket = None
def on_next(item):
nonlocal client
nonlocal bucket
if type(item) is Configure:
session = Session(aws_access_key_id=item.access_key,
aws_secret_access_key=item.secret_key)
client = session.client(
's3',
endpoint_url=item.endpoint_url,
region_name=item.region_name)
bucket = item.bucket
elif type(item) is UploadObject:
data = BytesIO(item.data)
client.upload_fileobj(data, bucket, item.key)
loop.call_soon_threadsafe(observer.on_next, UploadReponse(
key=item.key,
id=item.id))
else:
loop.call_soon_threadsafe(observer.on_error, "unknown item: {}".format(type(item)))
sink.request.subscribe(
on_next=on_next,
on_error=lambda e: loop.call_soon_threadsafe(observer.on_error, e),
on_completed=lambda: loop.call_soon_threadsafe(observer.on_completed))
return Source(
response=rx.create(on_subscribe)
)
return Component(call=driver, input=Sink)
| [
"[email protected]"
] | |
de47d3adc3f532d09cc14eab66da496c3a9dfa6a | 5f82cd4c97e3bc950799f3d2feedd8e5f800dc4c | /FNN.py | 7e0a73075675825258d17f22f9d8062db01426e9 | [] | no_license | Zhetuo-Zhao/deepLearning_template | 77f0b79d229999f009de61fe43c5d80a85ce7743 | c477a4eccb24cd833e2cbdd9840923f5d3f6ebb1 | refs/heads/master | 2022-12-19T05:19:28.724019 | 2020-09-16T06:11:17 | 2020-09-16T06:11:17 | 295,637,301 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,401 | py | # %%
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist=input_data.read_data_sets("/tmp/data/", one_hot=True)
n_nodes_hl1=500
n_nodes_hl2=500
n_nodes_hl3=500
n_classes=10
batch_size=100
x=tf.placeholder('float',[None, 784])
y=tf.placeholder('float')
def neural_network_model(data):
hidden_1_layer={'weights':tf.Variable(tf.random_normal([784,n_nodes_hl1])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl1]))}
hidden_2_layer = {'weights': tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),
'biases': tf.Variable(tf.random_normal([n_nodes_hl2]))}
hidden_3_layer = {'weights': tf.Variable(tf.random_normal([n_nodes_hl2, n_nodes_hl3])),
'biases': tf.Variable(tf.random_normal([n_nodes_hl3]))}
output_layer = {'weights': tf.Variable(tf.random_normal([n_nodes_hl3, n_classes])),
'biases': tf.Variable(tf.random_normal([n_classes]))}
l1=tf.add(tf.matmul(data,hidden_1_layer['weights']), hidden_1_layer['biases'])
l1=tf.nn.relu(l1)
l2 = tf.add(tf.matmul(l1, hidden_2_layer['weights']), hidden_2_layer['biases'])
l2 = tf.nn.relu(l2)
l3 = tf.add(tf.matmul(l2, hidden_3_layer['weights']), hidden_3_layer['biases'])
l3 = tf.nn.relu(l3)
output = tf.add(tf.matmul(l3, output_layer['weights']), output_layer['biases'])
return output
def train_neural_network(x,y):
prediction = neural_network_model(x)
cost=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=prediction))
optimizer=tf.train.AdamOptimizer().minimize(cost)
hm_epochs=10
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for epoch in range(hm_epochs):
epoch_loss=0
for _ in range(int(mnist.train.num_examples/batch_size)):
epoch_x, epoch_y = mnist.train.next_batch(batch_size)
_, c=sess.run([optimizer,cost],feed_dict={x:epoch_x, y:epoch_y})
epoch_loss += c
print('Epoch', epoch, 'completed out of', hm_epochs, 'loss:', epoch_loss)
correct= tf.equal(tf.argmax(prediction,1),tf.argmax(y,1))
accuracy =tf.reduce_mean(tf.cast(correct,'float'))
print('Accuracy:', accuracy.eval({x:mnist.test.images, y:mnist.test.labels}))
train_neural_network(x,y) | [
"[email protected]"
] | |
a70ed864f8709eca7cb6f56bd0f9445ad1b82d1b | b828fc06f40d1754dc5d6ab87b7360b97dff2938 | /intrinio_sdk/models/zacks_long_term_growth_rate.py | 38b6c21e356c2fd74ea5df4aca5ee4409d3c9166 | [] | no_license | dhruvsagar/python-sdk | 90302f3727022b9bc2dea83c7df2268bac180281 | 792f8b47a5d3238a92f62b40d164639850d9c4cb | refs/heads/master | 2022-06-04T20:38:51.263726 | 2020-05-05T20:24:29 | 2020-05-05T20:24:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57,035 | py | # coding: utf-8
"""
Intrinio API
Welcome to the Intrinio API! Through our Financial Data Marketplace, we offer a wide selection of financial data feed APIs sourced by our own proprietary processes as well as from many data vendors. For a complete API request / response reference please view the [Intrinio API documentation](https://intrinio.com/documentation/api_v2). If you need additional help in using the API, please visit the [Intrinio website](https://intrinio.com) and click on the chat icon in the lower right corner. # noqa: E501
OpenAPI spec version: 2.13.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from intrinio_sdk.models.security_summary import SecuritySummary # noqa: F401,E501
class ZacksLongTermGrowthRate(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'mean': 'float',
'median': 'float',
'count': 'int',
'high': 'float',
'low': 'float',
'std_dev': 'str',
'revisions_upward': 'int',
'revisions_downward': 'int',
'mean_7_days_ago': 'str',
'mean_30_days_ago': 'str',
'mean_60_days_ago': 'str',
'mean_90_days_ago': 'str',
'revisions_upward_last_7_days': 'str',
'revisions_downward_last_7_days': 'str',
'revisions_upward_last_30_days': 'str',
'revisions_downward_last_30_days': 'str',
'revisions_upward_last_60_days': 'str',
'revisions_downward_last_60_days': 'str',
'revisions_upward_last_90_days': 'str',
'revisions_downward_last_90_days': 'str',
'revisions_upward_last_120_days': 'str',
'revisions_downward_last_120_days': 'str',
'revisions_upward_last_150_days': 'str',
'revisions_downward_last_150_days': 'str',
'security': 'SecuritySummary'
}
attribute_map = {
'mean': 'mean',
'median': 'median',
'count': 'count',
'high': 'high',
'low': 'low',
'std_dev': 'std_dev',
'revisions_upward': 'revisions_upward',
'revisions_downward': 'revisions_downward',
'mean_7_days_ago': 'mean_7_days_ago',
'mean_30_days_ago': 'mean_30_days_ago',
'mean_60_days_ago': 'mean_60_days_ago',
'mean_90_days_ago': 'mean_90_days_ago',
'revisions_upward_last_7_days': 'revisions_upward_last_7_days',
'revisions_downward_last_7_days': 'revisions_downward_last_7_days',
'revisions_upward_last_30_days': 'revisions_upward_last_30_days',
'revisions_downward_last_30_days': 'revisions_downward_last_30_days',
'revisions_upward_last_60_days': 'revisions_upward_last_60_days',
'revisions_downward_last_60_days': 'revisions_downward_last_60_days',
'revisions_upward_last_90_days': 'revisions_upward_last_90_days',
'revisions_downward_last_90_days': 'revisions_downward_last_90_days',
'revisions_upward_last_120_days': 'revisions_upward_last_120_days',
'revisions_downward_last_120_days': 'revisions_downward_last_120_days',
'revisions_upward_last_150_days': 'revisions_upward_last_150_days',
'revisions_downward_last_150_days': 'revisions_downward_last_150_days',
'security': 'security'
}
def __init__(self, mean=None, median=None, count=None, high=None, low=None, std_dev=None, revisions_upward=None, revisions_downward=None, mean_7_days_ago=None, mean_30_days_ago=None, mean_60_days_ago=None, mean_90_days_ago=None, revisions_upward_last_7_days=None, revisions_downward_last_7_days=None, revisions_upward_last_30_days=None, revisions_downward_last_30_days=None, revisions_upward_last_60_days=None, revisions_downward_last_60_days=None, revisions_upward_last_90_days=None, revisions_downward_last_90_days=None, revisions_upward_last_120_days=None, revisions_downward_last_120_days=None, revisions_upward_last_150_days=None, revisions_downward_last_150_days=None, security=None): # noqa: E501
"""ZacksLongTermGrowthRate - a model defined in Swagger""" # noqa: E501
self._mean = None
self._median = None
self._count = None
self._high = None
self._low = None
self._std_dev = None
self._revisions_upward = None
self._revisions_downward = None
self._mean_7_days_ago = None
self._mean_30_days_ago = None
self._mean_60_days_ago = None
self._mean_90_days_ago = None
self._revisions_upward_last_7_days = None
self._revisions_downward_last_7_days = None
self._revisions_upward_last_30_days = None
self._revisions_downward_last_30_days = None
self._revisions_upward_last_60_days = None
self._revisions_downward_last_60_days = None
self._revisions_upward_last_90_days = None
self._revisions_downward_last_90_days = None
self._revisions_upward_last_120_days = None
self._revisions_downward_last_120_days = None
self._revisions_upward_last_150_days = None
self._revisions_downward_last_150_days = None
self._security = None
self.discriminator = None
if mean is not None:
self.mean = mean
if median is not None:
self.median = median
if count is not None:
self.count = count
if high is not None:
self.high = high
if low is not None:
self.low = low
if std_dev is not None:
self.std_dev = std_dev
if revisions_upward is not None:
self.revisions_upward = revisions_upward
if revisions_downward is not None:
self.revisions_downward = revisions_downward
if mean_7_days_ago is not None:
self.mean_7_days_ago = mean_7_days_ago
if mean_30_days_ago is not None:
self.mean_30_days_ago = mean_30_days_ago
if mean_60_days_ago is not None:
self.mean_60_days_ago = mean_60_days_ago
if mean_90_days_ago is not None:
self.mean_90_days_ago = mean_90_days_ago
if revisions_upward_last_7_days is not None:
self.revisions_upward_last_7_days = revisions_upward_last_7_days
if revisions_downward_last_7_days is not None:
self.revisions_downward_last_7_days = revisions_downward_last_7_days
if revisions_upward_last_30_days is not None:
self.revisions_upward_last_30_days = revisions_upward_last_30_days
if revisions_downward_last_30_days is not None:
self.revisions_downward_last_30_days = revisions_downward_last_30_days
if revisions_upward_last_60_days is not None:
self.revisions_upward_last_60_days = revisions_upward_last_60_days
if revisions_downward_last_60_days is not None:
self.revisions_downward_last_60_days = revisions_downward_last_60_days
if revisions_upward_last_90_days is not None:
self.revisions_upward_last_90_days = revisions_upward_last_90_days
if revisions_downward_last_90_days is not None:
self.revisions_downward_last_90_days = revisions_downward_last_90_days
if revisions_upward_last_120_days is not None:
self.revisions_upward_last_120_days = revisions_upward_last_120_days
if revisions_downward_last_120_days is not None:
self.revisions_downward_last_120_days = revisions_downward_last_120_days
if revisions_upward_last_150_days is not None:
self.revisions_upward_last_150_days = revisions_upward_last_150_days
if revisions_downward_last_150_days is not None:
self.revisions_downward_last_150_days = revisions_downward_last_150_days
if security is not None:
self.security = security
@property
def mean(self):
"""Gets the mean of this ZacksLongTermGrowthRate. # noqa: E501
The mean long term growth estimate # noqa: E501
:return: The mean of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: float
"""
return self._mean
@property
def mean_dict(self):
"""Gets the mean of this ZacksLongTermGrowthRate. # noqa: E501
The mean long term growth estimate as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The mean of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: float
"""
result = None
value = self.mean
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'mean': value }
return result
@mean.setter
def mean(self, mean):
"""Sets the mean of this ZacksLongTermGrowthRate.
The mean long term growth estimate # noqa: E501
:param mean: The mean of this ZacksLongTermGrowthRate. # noqa: E501
:type: float
"""
self._mean = mean
@property
def median(self):
"""Gets the median of this ZacksLongTermGrowthRate. # noqa: E501
The median long term growth estimate # noqa: E501
:return: The median of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: float
"""
return self._median
@property
def median_dict(self):
"""Gets the median of this ZacksLongTermGrowthRate. # noqa: E501
The median long term growth estimate as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The median of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: float
"""
result = None
value = self.median
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'median': value }
return result
@median.setter
def median(self, median):
"""Sets the median of this ZacksLongTermGrowthRate.
The median long term growth estimate # noqa: E501
:param median: The median of this ZacksLongTermGrowthRate. # noqa: E501
:type: float
"""
self._median = median
@property
def count(self):
"""Gets the count of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimates # noqa: E501
:return: The count of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: int
"""
return self._count
@property
def count_dict(self):
"""Gets the count of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimates as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The count of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: int
"""
result = None
value = self.count
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'count': value }
return result
@count.setter
def count(self, count):
"""Sets the count of this ZacksLongTermGrowthRate.
The number of long term growth estimates # noqa: E501
:param count: The count of this ZacksLongTermGrowthRate. # noqa: E501
:type: int
"""
self._count = count
@property
def high(self):
"""Gets the high of this ZacksLongTermGrowthRate. # noqa: E501
The high long term growth estimate # noqa: E501
:return: The high of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: float
"""
return self._high
@property
def high_dict(self):
"""Gets the high of this ZacksLongTermGrowthRate. # noqa: E501
The high long term growth estimate as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The high of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: float
"""
result = None
value = self.high
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'high': value }
return result
@high.setter
def high(self, high):
"""Sets the high of this ZacksLongTermGrowthRate.
The high long term growth estimate # noqa: E501
:param high: The high of this ZacksLongTermGrowthRate. # noqa: E501
:type: float
"""
self._high = high
@property
def low(self):
"""Gets the low of this ZacksLongTermGrowthRate. # noqa: E501
The low long term growth estimate # noqa: E501
:return: The low of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: float
"""
return self._low
@property
def low_dict(self):
"""Gets the low of this ZacksLongTermGrowthRate. # noqa: E501
The low long term growth estimate as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The low of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: float
"""
result = None
value = self.low
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'low': value }
return result
@low.setter
def low(self, low):
"""Sets the low of this ZacksLongTermGrowthRate.
The low long term growth estimate # noqa: E501
:param low: The low of this ZacksLongTermGrowthRate. # noqa: E501
:type: float
"""
self._low = low
@property
def std_dev(self):
"""Gets the std_dev of this ZacksLongTermGrowthRate. # noqa: E501
The standard deviation long term growth estimate # noqa: E501
:return: The std_dev of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
return self._std_dev
@property
def std_dev_dict(self):
"""Gets the std_dev of this ZacksLongTermGrowthRate. # noqa: E501
The standard deviation long term growth estimate as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The std_dev of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
result = None
value = self.std_dev
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'std_dev': value }
return result
@std_dev.setter
def std_dev(self, std_dev):
"""Sets the std_dev of this ZacksLongTermGrowthRate.
The standard deviation long term growth estimate # noqa: E501
:param std_dev: The std_dev of this ZacksLongTermGrowthRate. # noqa: E501
:type: str
"""
self._std_dev = std_dev
@property
def revisions_upward(self):
"""Gets the revisions_upward of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions upward # noqa: E501
:return: The revisions_upward of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: int
"""
return self._revisions_upward
@property
def revisions_upward_dict(self):
"""Gets the revisions_upward of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions upward as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The revisions_upward of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: int
"""
result = None
value = self.revisions_upward
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'revisions_upward': value }
return result
@revisions_upward.setter
def revisions_upward(self, revisions_upward):
"""Sets the revisions_upward of this ZacksLongTermGrowthRate.
The number of long term growth estimate revisions upward # noqa: E501
:param revisions_upward: The revisions_upward of this ZacksLongTermGrowthRate. # noqa: E501
:type: int
"""
self._revisions_upward = revisions_upward
@property
def revisions_downward(self):
"""Gets the revisions_downward of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions downward # noqa: E501
:return: The revisions_downward of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: int
"""
return self._revisions_downward
@property
def revisions_downward_dict(self):
"""Gets the revisions_downward of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions downward as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The revisions_downward of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: int
"""
result = None
value = self.revisions_downward
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'revisions_downward': value }
return result
@revisions_downward.setter
def revisions_downward(self, revisions_downward):
"""Sets the revisions_downward of this ZacksLongTermGrowthRate.
The number of long term growth estimate revisions downward # noqa: E501
:param revisions_downward: The revisions_downward of this ZacksLongTermGrowthRate. # noqa: E501
:type: int
"""
self._revisions_downward = revisions_downward
@property
def mean_7_days_ago(self):
"""Gets the mean_7_days_ago of this ZacksLongTermGrowthRate. # noqa: E501
The mean long term growth estimate 7 days ago # noqa: E501
:return: The mean_7_days_ago of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
return self._mean_7_days_ago
@property
def mean_7_days_ago_dict(self):
"""Gets the mean_7_days_ago of this ZacksLongTermGrowthRate. # noqa: E501
The mean long term growth estimate 7 days ago as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The mean_7_days_ago of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
result = None
value = self.mean_7_days_ago
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'mean_7_days_ago': value }
return result
@mean_7_days_ago.setter
def mean_7_days_ago(self, mean_7_days_ago):
"""Sets the mean_7_days_ago of this ZacksLongTermGrowthRate.
The mean long term growth estimate 7 days ago # noqa: E501
:param mean_7_days_ago: The mean_7_days_ago of this ZacksLongTermGrowthRate. # noqa: E501
:type: str
"""
self._mean_7_days_ago = mean_7_days_ago
@property
def mean_30_days_ago(self):
"""Gets the mean_30_days_ago of this ZacksLongTermGrowthRate. # noqa: E501
The mean long term growth estimate 30 days ago # noqa: E501
:return: The mean_30_days_ago of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
return self._mean_30_days_ago
@property
def mean_30_days_ago_dict(self):
"""Gets the mean_30_days_ago of this ZacksLongTermGrowthRate. # noqa: E501
The mean long term growth estimate 30 days ago as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The mean_30_days_ago of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
result = None
value = self.mean_30_days_ago
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'mean_30_days_ago': value }
return result
@mean_30_days_ago.setter
def mean_30_days_ago(self, mean_30_days_ago):
"""Sets the mean_30_days_ago of this ZacksLongTermGrowthRate.
The mean long term growth estimate 30 days ago # noqa: E501
:param mean_30_days_ago: The mean_30_days_ago of this ZacksLongTermGrowthRate. # noqa: E501
:type: str
"""
self._mean_30_days_ago = mean_30_days_ago
@property
def mean_60_days_ago(self):
"""Gets the mean_60_days_ago of this ZacksLongTermGrowthRate. # noqa: E501
The mean long term growth estimate 60 days ago # noqa: E501
:return: The mean_60_days_ago of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
return self._mean_60_days_ago
@property
def mean_60_days_ago_dict(self):
"""Gets the mean_60_days_ago of this ZacksLongTermGrowthRate. # noqa: E501
The mean long term growth estimate 60 days ago as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The mean_60_days_ago of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
result = None
value = self.mean_60_days_ago
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'mean_60_days_ago': value }
return result
@mean_60_days_ago.setter
def mean_60_days_ago(self, mean_60_days_ago):
"""Sets the mean_60_days_ago of this ZacksLongTermGrowthRate.
The mean long term growth estimate 60 days ago # noqa: E501
:param mean_60_days_ago: The mean_60_days_ago of this ZacksLongTermGrowthRate. # noqa: E501
:type: str
"""
self._mean_60_days_ago = mean_60_days_ago
@property
def mean_90_days_ago(self):
"""Gets the mean_90_days_ago of this ZacksLongTermGrowthRate. # noqa: E501
The mean long term growth estimate 90 days ago # noqa: E501
:return: The mean_90_days_ago of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
return self._mean_90_days_ago
@property
def mean_90_days_ago_dict(self):
"""Gets the mean_90_days_ago of this ZacksLongTermGrowthRate. # noqa: E501
The mean long term growth estimate 90 days ago as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The mean_90_days_ago of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
result = None
value = self.mean_90_days_ago
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'mean_90_days_ago': value }
return result
@mean_90_days_ago.setter
def mean_90_days_ago(self, mean_90_days_ago):
"""Sets the mean_90_days_ago of this ZacksLongTermGrowthRate.
The mean long term growth estimate 90 days ago # noqa: E501
:param mean_90_days_ago: The mean_90_days_ago of this ZacksLongTermGrowthRate. # noqa: E501
:type: str
"""
self._mean_90_days_ago = mean_90_days_ago
@property
def revisions_upward_last_7_days(self):
"""Gets the revisions_upward_last_7_days of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions upward in the last 7 days # noqa: E501
:return: The revisions_upward_last_7_days of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
return self._revisions_upward_last_7_days
@property
def revisions_upward_last_7_days_dict(self):
"""Gets the revisions_upward_last_7_days of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions upward in the last 7 days as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The revisions_upward_last_7_days of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
result = None
value = self.revisions_upward_last_7_days
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'revisions_upward_last_7_days': value }
return result
@revisions_upward_last_7_days.setter
def revisions_upward_last_7_days(self, revisions_upward_last_7_days):
"""Sets the revisions_upward_last_7_days of this ZacksLongTermGrowthRate.
The number of long term growth estimate revisions upward in the last 7 days # noqa: E501
:param revisions_upward_last_7_days: The revisions_upward_last_7_days of this ZacksLongTermGrowthRate. # noqa: E501
:type: str
"""
self._revisions_upward_last_7_days = revisions_upward_last_7_days
@property
def revisions_downward_last_7_days(self):
"""Gets the revisions_downward_last_7_days of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions downward in the last 7 days # noqa: E501
:return: The revisions_downward_last_7_days of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
return self._revisions_downward_last_7_days
@property
def revisions_downward_last_7_days_dict(self):
"""Gets the revisions_downward_last_7_days of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions downward in the last 7 days as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The revisions_downward_last_7_days of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
result = None
value = self.revisions_downward_last_7_days
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'revisions_downward_last_7_days': value }
return result
@revisions_downward_last_7_days.setter
def revisions_downward_last_7_days(self, revisions_downward_last_7_days):
"""Sets the revisions_downward_last_7_days of this ZacksLongTermGrowthRate.
The number of long term growth estimate revisions downward in the last 7 days # noqa: E501
:param revisions_downward_last_7_days: The revisions_downward_last_7_days of this ZacksLongTermGrowthRate. # noqa: E501
:type: str
"""
self._revisions_downward_last_7_days = revisions_downward_last_7_days
@property
def revisions_upward_last_30_days(self):
"""Gets the revisions_upward_last_30_days of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions upward in the last 30 days # noqa: E501
:return: The revisions_upward_last_30_days of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
return self._revisions_upward_last_30_days
@property
def revisions_upward_last_30_days_dict(self):
"""Gets the revisions_upward_last_30_days of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions upward in the last 30 days as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The revisions_upward_last_30_days of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
result = None
value = self.revisions_upward_last_30_days
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'revisions_upward_last_30_days': value }
return result
@revisions_upward_last_30_days.setter
def revisions_upward_last_30_days(self, revisions_upward_last_30_days):
"""Sets the revisions_upward_last_30_days of this ZacksLongTermGrowthRate.
The number of long term growth estimate revisions upward in the last 30 days # noqa: E501
:param revisions_upward_last_30_days: The revisions_upward_last_30_days of this ZacksLongTermGrowthRate. # noqa: E501
:type: str
"""
self._revisions_upward_last_30_days = revisions_upward_last_30_days
@property
def revisions_downward_last_30_days(self):
"""Gets the revisions_downward_last_30_days of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions downward in the last 30 days # noqa: E501
:return: The revisions_downward_last_30_days of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
return self._revisions_downward_last_30_days
@property
def revisions_downward_last_30_days_dict(self):
"""Gets the revisions_downward_last_30_days of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions downward in the last 30 days as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The revisions_downward_last_30_days of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
result = None
value = self.revisions_downward_last_30_days
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'revisions_downward_last_30_days': value }
return result
@revisions_downward_last_30_days.setter
def revisions_downward_last_30_days(self, revisions_downward_last_30_days):
"""Sets the revisions_downward_last_30_days of this ZacksLongTermGrowthRate.
The number of long term growth estimate revisions downward in the last 30 days # noqa: E501
:param revisions_downward_last_30_days: The revisions_downward_last_30_days of this ZacksLongTermGrowthRate. # noqa: E501
:type: str
"""
self._revisions_downward_last_30_days = revisions_downward_last_30_days
@property
def revisions_upward_last_60_days(self):
"""Gets the revisions_upward_last_60_days of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions upward in the last 60 days # noqa: E501
:return: The revisions_upward_last_60_days of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
return self._revisions_upward_last_60_days
@property
def revisions_upward_last_60_days_dict(self):
"""Gets the revisions_upward_last_60_days of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions upward in the last 60 days as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The revisions_upward_last_60_days of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
result = None
value = self.revisions_upward_last_60_days
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'revisions_upward_last_60_days': value }
return result
@revisions_upward_last_60_days.setter
def revisions_upward_last_60_days(self, revisions_upward_last_60_days):
"""Sets the revisions_upward_last_60_days of this ZacksLongTermGrowthRate.
The number of long term growth estimate revisions upward in the last 60 days # noqa: E501
:param revisions_upward_last_60_days: The revisions_upward_last_60_days of this ZacksLongTermGrowthRate. # noqa: E501
:type: str
"""
self._revisions_upward_last_60_days = revisions_upward_last_60_days
@property
def revisions_downward_last_60_days(self):
"""Gets the revisions_downward_last_60_days of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions downward in the last 60 days # noqa: E501
:return: The revisions_downward_last_60_days of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
return self._revisions_downward_last_60_days
@property
def revisions_downward_last_60_days_dict(self):
"""Gets the revisions_downward_last_60_days of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions downward in the last 60 days as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The revisions_downward_last_60_days of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
result = None
value = self.revisions_downward_last_60_days
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'revisions_downward_last_60_days': value }
return result
@revisions_downward_last_60_days.setter
def revisions_downward_last_60_days(self, revisions_downward_last_60_days):
"""Sets the revisions_downward_last_60_days of this ZacksLongTermGrowthRate.
The number of long term growth estimate revisions downward in the last 60 days # noqa: E501
:param revisions_downward_last_60_days: The revisions_downward_last_60_days of this ZacksLongTermGrowthRate. # noqa: E501
:type: str
"""
self._revisions_downward_last_60_days = revisions_downward_last_60_days
@property
def revisions_upward_last_90_days(self):
"""Gets the revisions_upward_last_90_days of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions upward in the last 90 days # noqa: E501
:return: The revisions_upward_last_90_days of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
return self._revisions_upward_last_90_days
@property
def revisions_upward_last_90_days_dict(self):
"""Gets the revisions_upward_last_90_days of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions upward in the last 90 days as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The revisions_upward_last_90_days of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
result = None
value = self.revisions_upward_last_90_days
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'revisions_upward_last_90_days': value }
return result
@revisions_upward_last_90_days.setter
def revisions_upward_last_90_days(self, revisions_upward_last_90_days):
"""Sets the revisions_upward_last_90_days of this ZacksLongTermGrowthRate.
The number of long term growth estimate revisions upward in the last 90 days # noqa: E501
:param revisions_upward_last_90_days: The revisions_upward_last_90_days of this ZacksLongTermGrowthRate. # noqa: E501
:type: str
"""
self._revisions_upward_last_90_days = revisions_upward_last_90_days
@property
def revisions_downward_last_90_days(self):
"""Gets the revisions_downward_last_90_days of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions downward in the last 90 days # noqa: E501
:return: The revisions_downward_last_90_days of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
return self._revisions_downward_last_90_days
@property
def revisions_downward_last_90_days_dict(self):
"""Gets the revisions_downward_last_90_days of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions downward in the last 90 days as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The revisions_downward_last_90_days of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
result = None
value = self.revisions_downward_last_90_days
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'revisions_downward_last_90_days': value }
return result
@revisions_downward_last_90_days.setter
def revisions_downward_last_90_days(self, revisions_downward_last_90_days):
"""Sets the revisions_downward_last_90_days of this ZacksLongTermGrowthRate.
The number of long term growth estimate revisions downward in the last 90 days # noqa: E501
:param revisions_downward_last_90_days: The revisions_downward_last_90_days of this ZacksLongTermGrowthRate. # noqa: E501
:type: str
"""
self._revisions_downward_last_90_days = revisions_downward_last_90_days
@property
def revisions_upward_last_120_days(self):
"""Gets the revisions_upward_last_120_days of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions upward in the last 120 days # noqa: E501
:return: The revisions_upward_last_120_days of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
return self._revisions_upward_last_120_days
@property
def revisions_upward_last_120_days_dict(self):
"""Gets the revisions_upward_last_120_days of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions upward in the last 120 days as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The revisions_upward_last_120_days of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
result = None
value = self.revisions_upward_last_120_days
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'revisions_upward_last_120_days': value }
return result
@revisions_upward_last_120_days.setter
def revisions_upward_last_120_days(self, revisions_upward_last_120_days):
"""Sets the revisions_upward_last_120_days of this ZacksLongTermGrowthRate.
The number of long term growth estimate revisions upward in the last 120 days # noqa: E501
:param revisions_upward_last_120_days: The revisions_upward_last_120_days of this ZacksLongTermGrowthRate. # noqa: E501
:type: str
"""
self._revisions_upward_last_120_days = revisions_upward_last_120_days
@property
def revisions_downward_last_120_days(self):
"""Gets the revisions_downward_last_120_days of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions downward in the last 120 days # noqa: E501
:return: The revisions_downward_last_120_days of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
return self._revisions_downward_last_120_days
@property
def revisions_downward_last_120_days_dict(self):
"""Gets the revisions_downward_last_120_days of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions downward in the last 120 days as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The revisions_downward_last_120_days of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
result = None
value = self.revisions_downward_last_120_days
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'revisions_downward_last_120_days': value }
return result
@revisions_downward_last_120_days.setter
def revisions_downward_last_120_days(self, revisions_downward_last_120_days):
"""Sets the revisions_downward_last_120_days of this ZacksLongTermGrowthRate.
The number of long term growth estimate revisions downward in the last 120 days # noqa: E501
:param revisions_downward_last_120_days: The revisions_downward_last_120_days of this ZacksLongTermGrowthRate. # noqa: E501
:type: str
"""
self._revisions_downward_last_120_days = revisions_downward_last_120_days
@property
def revisions_upward_last_150_days(self):
"""Gets the revisions_upward_last_150_days of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions upward in the last 150 days # noqa: E501
:return: The revisions_upward_last_150_days of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
return self._revisions_upward_last_150_days
@property
def revisions_upward_last_150_days_dict(self):
"""Gets the revisions_upward_last_150_days of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions upward in the last 150 days as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The revisions_upward_last_150_days of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
result = None
value = self.revisions_upward_last_150_days
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'revisions_upward_last_150_days': value }
return result
@revisions_upward_last_150_days.setter
def revisions_upward_last_150_days(self, revisions_upward_last_150_days):
"""Sets the revisions_upward_last_150_days of this ZacksLongTermGrowthRate.
The number of long term growth estimate revisions upward in the last 150 days # noqa: E501
:param revisions_upward_last_150_days: The revisions_upward_last_150_days of this ZacksLongTermGrowthRate. # noqa: E501
:type: str
"""
self._revisions_upward_last_150_days = revisions_upward_last_150_days
@property
def revisions_downward_last_150_days(self):
"""Gets the revisions_downward_last_150_days of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions downward in the last 150 days # noqa: E501
:return: The revisions_downward_last_150_days of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
return self._revisions_downward_last_150_days
@property
def revisions_downward_last_150_days_dict(self):
"""Gets the revisions_downward_last_150_days of this ZacksLongTermGrowthRate. # noqa: E501
The number of long term growth estimate revisions downward in the last 150 days as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The revisions_downward_last_150_days of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: str
"""
result = None
value = self.revisions_downward_last_150_days
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'revisions_downward_last_150_days': value }
return result
@revisions_downward_last_150_days.setter
def revisions_downward_last_150_days(self, revisions_downward_last_150_days):
"""Sets the revisions_downward_last_150_days of this ZacksLongTermGrowthRate.
The number of long term growth estimate revisions downward in the last 150 days # noqa: E501
:param revisions_downward_last_150_days: The revisions_downward_last_150_days of this ZacksLongTermGrowthRate. # noqa: E501
:type: str
"""
self._revisions_downward_last_150_days = revisions_downward_last_150_days
@property
def security(self):
"""Gets the security of this ZacksLongTermGrowthRate. # noqa: E501
The Security of the Zacks long term growth estimate # noqa: E501
:return: The security of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: SecuritySummary
"""
return self._security
@property
def security_dict(self):
"""Gets the security of this ZacksLongTermGrowthRate. # noqa: E501
The Security of the Zacks long term growth estimate as a dictionary. Useful for Panda Dataframes. # noqa: E501
:return: The security of this ZacksLongTermGrowthRate. # noqa: E501
:rtype: SecuritySummary
"""
result = None
value = self.security
if isinstance(value, list):
result = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result = value.to_dict()
elif isinstance(value, dict):
result = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result = { 'security': value }
return result
@security.setter
def security(self, security):
"""Sets the security of this ZacksLongTermGrowthRate.
The Security of the Zacks long term growth estimate # noqa: E501
:param security: The security of this ZacksLongTermGrowthRate. # noqa: E501
:type: SecuritySummary
"""
self._security = security
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ZacksLongTermGrowthRate):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
f354b13480738cfc382c8ee67d5d250310c01f13 | eebeeb2c31dc90b21878196502efec9086b87c46 | /07/vmparser.py | b63709acbddb5f5c3126b6dab11a6926fae4627c | [] | no_license | festa78/nand2tetris | 6f07673d69277125331f7536f18214dce64d8008 | 815f887c584d3de91591abe44123ee5c3b006575 | refs/heads/master | 2023-03-06T00:46:01.412600 | 2021-02-07T12:35:33 | 2021-02-22T08:53:08 | 328,305,563 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,696 | py | class Parser:
COMMAND_TYPES = (
'C_ARITHMETIC',
'C_PUSH',
'C_POP',
'C_LABEL',
'C_GOTO',
'C_IF',
'C_FUNCTION',
'C_RETURN',
'C_CALL',
)
COMMAND_TO_TYPES = {
'and': COMMAND_TYPES[0],
'or': COMMAND_TYPES[0],
'not': COMMAND_TYPES[0],
'add': COMMAND_TYPES[0],
'sub': COMMAND_TYPES[0],
'neg': COMMAND_TYPES[0],
'eq': COMMAND_TYPES[0],
'lt': COMMAND_TYPES[0],
'gt': COMMAND_TYPES[0],
'push': COMMAND_TYPES[1],
'pop': COMMAND_TYPES[2],
}
def __init__(self, vmpath):
if not vmpath.endswith('.vm'):
raise ValueError('Not an .vm file but {}'.format(vmpath))
self.commands = []
with open(vmpath, 'r') as f:
for line in f.read().splitlines():
line = line.strip()
if not line:
continue
if line.startswith('//'):
continue
if '//' in line:
line = line.split('//')[0].strip()
self.commands.append(line)
self.index = 0
def hasMoreCommands(self):
return self.index + 1 < len(self.commands)
def advance(self):
assert self.hasMoreCommands()
self.index += 1
def commandType(self):
current_command = self.commands[self.index]
command_name = current_command.split(' ')[0]
if command_name not in self.COMMAND_TO_TYPES.keys():
raise AttributeError('Unsupported command')
return self.COMMAND_TO_TYPES[command_name]
def arg1(self):
command_type = self.commandType()
assert command_type != self.COMMAND_TYPES[7], 'Not for C_RETURN'
current_command = self.commands[self.index]
if command_type in self.COMMAND_TYPES[0]:
# C_ARITHMETIC.
return current_command.split(' ')[0]
return current_command.split(' ')[1]
def arg2(self):
command_type = self.commandType()
current_command = self.commands[self.index]
if command_type == self.COMMAND_TYPES[1]:
# C_PUSH.
return current_command.split(' ')[2]
elif command_type == self.COMMAND_TYPES[2]:
# C_POP.
return current_command.split(' ')[2]
elif command_type == self.COMMAND_TYPES[6]:
# C_FUNCTION.
return current_command.split(' ')[2]
elif command_type == self.COMMAND_TYPES[8]:
# C_CALL.
return current_command.split(' ')[2]
else:
raise ValueError('Unsupported command type.') | [
"[email protected]"
] | |
c82b677441afb16074f0386638f5da0f86f9303e | 56a8d1f72b005bd52560c3804541be729876aa9f | /rotation.py | 2f05ebde4daf3525b7c39a173e8cbb402cf3dc59 | [] | no_license | drrobotk/pycodilitytests | e5e13c9dd683207290e598e577d73555c0ef29ed | acb5a8ad52135fa327fb97d7c42f95ae23cb3389 | refs/heads/master | 2021-04-14T03:16:33.397722 | 2020-03-22T15:23:57 | 2020-03-22T15:23:57 | 249,203,592 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | # you can write to stdout for debugging purposes, e.g.
# print("this is a debug message")
def solution(A, K):
k = 0
# write your code in Python 3.6
if len(A) != 0:
for i in range(K):
k = A[len(A)-1]
A.pop()
A.insert(0,k)
return A
if __name__ == '__main__':
A = []
K = 3
result = solution(A, K)
print(result) | [
"[email protected]"
] | |
835fa05e3acbfa8beb25f0ef8b975295d4668c90 | 20002b0c41f0ff67553ea7ffb6568975792d8c95 | /main.py | 37a235d422efc3a4b23733caa5426aac6eee1393 | [] | no_license | hugos0910/Data_Science_London | be18667fd0121ba0b2549f02263fcad6d2a54448 | 74dcdd7d2504f0ba9be9b58acb01d2e305827b12 | refs/heads/master | 2021-01-18T06:00:53.529998 | 2016-09-21T17:17:48 | 2016-09-21T17:17:48 | 68,791,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,502 | py |
import numpy as np
import pandas as pd
import util
from sklearn.neighbors import KNeighborsClassifier
from sklearn.decomposition import PCA
from sklearn.svm import SVC
# Import data
print('Importing data...')
train = pd.read_csv('train.csv', header = None)
test = pd.read_csv('test.csv', header = None)
label = pd.read_csv('trainLabels.csv', header = None)
label = np.ravel(label)
# Cleaning data
print('Sanitizing data...')
pca = PCA(n_components = 12, whiten = True)
train = pca.fit_transform(train)
test = pca.transform(test)
# # Obtain best parameters
# num_processor = -1
# util.obtain_parameters('RF', train, label, num_processor)
# util.obtain_parameters('ET', train, label, num_processor)
# util.obtain_parameters('SVM', train, label, num_processor)
# util.obtain_parameters('KNN', train, label, num_processor)
# util.obtain_parameters('LR', train, label, num_processor)
# Training classifier
'''
classifier abbreviations:
RF - Random Forest
ET - Extra Trees
SVM - Support Vector Machine
KNN - K Nearest Neighbors
LR - Logistic Regression
'''
classifier_name = 'SVM'
print('Training and prediction with %s classifier...' %classifier_name)
prediction = util.classify(classifier_name, train, label, test)
# Exporting solution
index = list(range(1,len(test) +1))
print('Writing data to CSV file...')
df_prediction = pd.DataFrame(data = prediction, index = index, columns = ['Solution'])
df_prediction_csv = df_prediction.to_csv('prediction_%s.csv' % classifier_name, index_label = ["Id"])
| [
"[email protected]"
] | |
a3832070b1ec7002d6f2dd0a9f5bd280d29a3962 | 1fe8d4133981e53e88abf633046060b56fae883e | /venv/lib/python3.8/site-packages/tensorflow/python/keras/layers/cudnn_recurrent 2.py | 96ae66c775e623fff4738688d4f11005c5261b33 | [] | no_license | Akira331/flask-cifar10 | 6c49db8485038731ce67d23f0972b9574746c7a7 | 283e7a2867c77d4b6aba7aea9013bf241d35d76c | refs/heads/master | 2023-06-14T16:35:06.384755 | 2021-07-05T14:09:15 | 2021-07-05T14:09:15 | 382,864,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:52c49577848819c4116b99c29c11e765e7a2d686e7ccb4dc7b84454bdf31510f
size 20854
| [
"[email protected]"
] | |
de220ce4ab36c212e05c19db10caeba92cbbe9e1 | 9f0d913822de7ebb86d9a2634e3b99d452db83ee | /py/folder2zip.py | 83bec69e42cb6e9818dbedca25d2eeee81ab084e | [] | no_license | esabox/code-desu | 3029e71d031a26c4da7687a394e4f497e09acabf | 63b3756035163cc3f480a0e87127eed93b090dfb | refs/heads/master | 2023-03-30T09:02:55.631388 | 2021-04-09T04:58:04 | 2021-04-09T04:58:04 | 222,210,315 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,994 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# フォルダの中のフォルダを無圧縮zip化
import os
import glob
import re
import sys
import shutil
gomibako = '/Users/kazoku/.Trash/'
def main():
print(sys.version)
print(sys.argv)
work_dir = '/Users/kazoku/Desktop/book/'
# workdirの引数があれば
if 1 < len(sys.argv):
work_dir = sys.argv[1]
os.chdir(work_dir)
print('pwd :'+os.getcwd())
ls = glob.glob('*/') # フォルダのみ
zipnisuru(ls)
res = input('\n[rename? Yes="" / No=other ]:')
if res == '':
print('リネーム実行')
zipnisuru(ls, True)
print('終了')
def zipnisuru(ls, write=False):
for f in ls:
# print(f)
nf = re.sub('/', '.zip', f)
print('new: '+nf)
cmd = 'zip -r --quiet -0 "' + nf + '" "' + f + '"'
# フォルダを圧縮するときは単一でも再帰必須
if write:
os.system(cmd)
print(cmd)
# os.remove(f) #ファイル一個か、空フォルダしか無理
# shutil.rmtree(f) #ゴミ箱経由にならない
shutil.move(f, gomibako)
pass
# zip -r -n ".jpg:.JPG:.jpeg:.JPEG:.gif:.GIF" "$zipf" "$zipd" -x ".DS_Store"
# -r オプションは、ZIPの入力ファイルにディレクトリがある場合、再帰的にたどっていくことを指示します。
# -n オプションは、引数として与えられた拡張子を持つファイルを、圧縮しないように指示します。tiffやpngも入れてもいいかもしれません。大文字小文字の違いを無視できればよいのですが、ちょっと冗長になっています。私は、テキストファイルなどは圧縮したいので、このオプションを使っています。
# -nオプションの代わりに、-0オプションを使えばすべてのファイルが無圧縮になります。-# (#は0から9の数字が入ります)の形式のオプションは、圧縮スピードを相対的に指定するのですが、-0は無圧縮の指定になっています。-0の方が無圧縮ファイル作成の目的に合う人はこちらの方がよいと思います。
# $zipfは、作成するZIPファイル名が入った変数です。
# $zipdは、作成対象のディレクトリ名が入った変数です。両方共ダブルクォーテーションで囲んであるのは、文字列中にスペースがあるとシェルが別々の引数として処理するため、予防的に入れています。
# 最後の-xオプションは、ZIPの対象としないファイルを指定します。.DS_StoreはFinderが不可視ファイルとして作る場合がありますが、今回はZIPファイルに含める必要はないため除外するようにしています。
# zipコマンドの詳細はターミナルからmanコマンドで調べることもできます。
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
e6417d749e8b17495ea9739ff8246df8c4a1a1eb | f5fe67aada0fca0ebc71d605e4a2dd3d60695a25 | /Evelyns Dateien/Grundpraktikum 2/US3/profil_streu_34.py | 2ee8df1f9cba6afa10dd8360dd49b71308ac5b84 | [] | no_license | rkallo/APWS1718 | 25eb10a88783434c81ebf1902936ceb6fc06ab0d | 5e124d5342ef2fd333311ddb4d323744323b68b8 | refs/heads/master | 2021-08-28T06:45:08.518688 | 2019-11-21T18:05:29 | 2019-11-21T18:05:29 | 223,233,392 | 2 | 1 | null | 2021-08-22T19:20:50 | 2019-11-21T17:59:30 | TeX | UTF-8 | Python | false | false | 479 | py | import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from pylab import figure, axes, pie, title, show
from fractions import *
x, y = np.loadtxt('streu_34.txt', unpack=True,delimiter=',')
plt.figure(1)
plt.plot(x, y,'rx', label='Messdaten')
plt.ylabel(r'$Intensität\,\,I$ $/ \, \% $')
plt.xlabel(r'$Eindringtiefe\,\,x$ $/ \, mm$')
plt.grid()
plt.legend()
plt.savefig('streu_34.pdf')
print ('Fertig') | [
"[email protected]"
] | |
ab7427ee9ab9e98e843967f814b678f41e26f819 | a0406e59552037a3277d51eb9994565d3f557d7f | /book_lib/presentation/resource/book.py | c60de1f3f7e52df92f27bfa9d0b5430f5ae8b497 | [] | no_license | eezhal92/flask-sqlalchemy | 84bbf18fc041a2339842d56a7924a10f0332b975 | 7092651a5b5b73f1366c1c2473aab9b1c4eedea5 | refs/heads/master | 2021-07-12T10:10:54.572392 | 2017-10-17T11:47:39 | 2017-10-17T11:47:39 | 106,980,363 | 0 | 1 | null | 2017-10-17T11:47:40 | 2017-10-15T03:19:24 | Python | UTF-8 | Python | false | false | 454 | py | """Books Controller."""
from flask import jsonify
from flask_restful import Resource
from book_lib.infrastructure.repository.book import BookRepository
class Book(Resource):
"""."""
def __init__(self, **kwargs):
"""."""
self.db = kwargs['db']
self.book_repo = BookRepository(self.db)
def get(self):
"""."""
books = [b.serialize() for b in self.book_repo.find_all()]
return jsonify(books)
| [
"[email protected]"
] | |
2d5543f03917a6065eba621b789a7e27e75d8cf2 | 21c1da4a354037f8aed1fb323d64295f1d40d0c6 | /additive-number/solution.py | 3f0c8e98f8f5ab4e12f51b90f3a53185f8d49432 | [] | no_license | hsinhoyeh/leecode | b0de2334a1bcd9277335fba4ae7e3519775da8f9 | 7760d44f7a9038f48e12eabb6d5bafd182a0a8f6 | refs/heads/master | 2021-01-10T10:11:40.259319 | 2017-08-11T01:31:22 | 2017-08-11T01:31:22 | 45,776,150 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,931 | py | import math
class NextNum(object):
def __init__(self, num, start_index, max_len=None):
self.num = num
self.start_index = start_index
self.end_index = start_index
self.max_len = max_len
self.last_val = None
def next(self, nlen=1): # default, lookup the number with len=1
# none of value is leading with 0
# make sure that we won't go further
if self.last_val == 0:
return None
self.last_val = self._next(nlen)
return self.last_val
def _next(self, nlen=1):
self.end_index += nlen
if self.end_index > len(self.num):
return None
if self.start_index > len(self.num):
return None
if self.max_len:
if int(math.fabs(self.end_index - self.start_index)) > self.max_len:
return None
if self.num[self.start_index] == '0':
return 0
return int(self.num[self.start_index: self.end_index])
class Solution(object):
def findDigits(self, anum):
return int(math.log(anum, 10)) + 1
# return the last index of found
# otherwise, return None
def findLast(self, num, lst_first_two_nums):
sum_of_2 = sum(lst_first_two_nums)
digits = self.findDigits(sum_of_2)
nn2 = NextNum(num, 0)
if nn2.next(digits) == sum_of_2:
return nn2.end_index
return None
def isAdditiveNumber(self, num):
"""
:type num: str
:rtype: bool
"""
# fix the first two numbers
# NOTE: the length of the first number and second shouldn't exceed n/2
# since first + second = thrid.
half_num = int(len(num)/2)
nn0 = NextNum(num, 0, half_num)
val0 = nn0.next()
while val0 != None:
# number 2 is start from the end of number 1
nn1 = NextNum(num, nn0.end_index, half_num)
val1 = nn1.next()
while val1 != None:
digits = self.findDigits(val0 + val1)
if len(num) < nn1.end_index + digits:
# no need to check
break
index = self.findLast(num[nn1.end_index:], [val0, val1])
if index:
index = index + nn1.end_index
tval0, tval1 = val0, val1
while index != len(num): # not end, we should keep looking
if index == None:
break
tval0, tval1 = tval1, tval0 + tval1
subindex = self.findLast(num[index:], [tval0, tval1])
if subindex:
index = index + subindex
else:
index = subindex
if index == len(num):
return True
val1 = nn1.next()
val0 = nn0.next()
return False
| [
"[email protected]"
] | |
58529c5f530f1fe4e00a6432565eb83f72ad4840 | f9c3c75718f5cfb3827658f8cedaa1fb8e289011 | /BujaMovies/migrations/0018_auto_20190903_2229.py | 77723265ca8edf07a4838817bed39093d340faac | [] | no_license | twizy/BullyHoodyGit | 7559c70269cd3b0abafb662a56fa33d95df9d6ff | 1a75617954428c70c82cf355f5aab0cb822ebc7a | refs/heads/master | 2020-07-12T21:25:29.609503 | 2019-09-03T21:50:57 | 2019-09-03T21:50:57 | 204,909,743 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 594 | py | # Generated by Django 2.2.3 on 2019-09-03 20:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('BujaMovies', '0017_auto_20190903_2118'),
]
operations = [
migrations.AddField(
model_name='films',
name='film',
field=models.FileField(blank=True, null=True, upload_to='Videos/'),
),
migrations.AlterField(
model_name='films',
name='cover',
field=models.ImageField(blank=True, null=True, upload_to='Covers/'),
),
]
| [
"[email protected]"
] | |
be7ba6113a8c980e9031c22cdb4c40498940dcc3 | c6bded1f43beb191c9c5fd7a96edc6f6bc366781 | /src/crawl_CF.py | d56d19a0593dac3dfdc11e4f72b330cb8d5b56d0 | [] | no_license | Renjerhady/EloR | 13d7a432504367755cda2200d08fd209fe7504a8 | 51edfaa260b491a7310815bbf961a3a099becbe7 | refs/heads/master | 2022-02-23T15:59:27.078638 | 2019-10-21T06:43:36 | 2019-10-21T06:44:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,830 | py | from bs4 import BeautifulSoup
import requests
import itertools
def get_soup(url):
headers = {
"User-Agent": "Pied Piper (www.arameb.com)",
"From": "Aram Ebtekar"
}
response = requests.get(url, headers=headers)
soup = BeautifulSoup(response.content, "html5lib")
return soup
def generate_hrefs(soup, prefix):
for href_tag in soup.find_all("a"):
href = href_tag["href"]
if href.startswith(prefix) and href.find("#") == -1:
href_suffix = href[len(prefix):]
if href_suffix.find("/") == -1:
yield href_suffix
def get_rated_contests(num_pages):
contests = []
for page in range(1, 1 + num_pages):
# Use ru because contests [541,648,649,780,904] were only made available in Russian
page_soup = get_soup(f"https://codeforces.com/contests/page/{page}?locale=ru")
for contest in generate_hrefs(page_soup, "/contest/"):
ratings_soup = get_soup(f"https://codeforces.com/contest/{contest}/ratings")
participants = ratings_soup.find_all(lambda tag: tag.name == "tr" and tag.has_attr("id"))
# Check that there is at least one *rated* participant
if len(participants) != 0:
contests.append(int(contest))
print(contest, flush=True)
list.reverse(contests)
print(f"The full list of {len(contests)} contests is {contests}", flush=True)
return contests
def participant_info(participant):
rank, handle = participant.find_all("td")[:2]
return handle.a.text, int(rank.text)
def save_contest_standings(contests, directory):
for contest in contests:
standings = []
tie_intervals = dict()
for page in itertools.count(1):
page_soup = get_soup(f"https://codeforces.com/contest/{contest}/ratings/page/{page}")
participants = page_soup.find_all(lambda tag: tag.name == "tr" and tag.has_attr("id"))
if page == 1:
title = page_soup.find(attrs={"class": "title"}).a.text.strip()
elif participant_info(participants[0]) == standings[100 * page - 200]:
break
for r, participant in enumerate(participants, len(standings) + 1):
handle, rank = participant_info(participant)
if len(standings) > 0 and standings[-1][1] == rank:
assert rank < r
else:
assert rank == r
standings.append((handle, rank))
tie_intervals[rank] = r
with open(f"{directory}/{contest}.txt", "w+") as standings_file:
standings_file.write(f"{len(standings)} {title}\n")
for handle, rank in standings:
standings_file.write(f"{handle} {rank} {tie_intervals[rank]}\n")
print(f"Standings saved to {contest}.txt")
def save_contests(contests, file):
with open(file, "w+") as contests_file:
contests_file.write(f"{len(contests)}\n")
for contest in contests:
contests_file.write(f"{contest}\n")
print(f"List of contests saved to {file}")
def get_contests(file):
contests_file = open(file, 'r')
return [int(contest) for contest in contests_file][1:]
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--pages', type=int, default=1,
help='Number of pages of users whose histories to search.')
args = parser.parse_args()
contests = get_rated_contests(args.pages)[-3:]
# contests = get_contests("../data/all_contests.txt")[-2:]
save_contest_standings(contests, "../standings")
| [
"[email protected]"
] | |
a39cbb706ac3420712b45eb050eae01efddba13e | 1e3f458b297b349eb875aebab254e05cdad2458e | /guessno.py | 6d6b1cee5d25c4ad5b2e5dd171bb21ffbf8c8694 | [] | no_license | mezeru/Python_Coding | 899169e162d01a2a1f6f043e45f3b07dc68e1001 | 99941431025b5c35731903dabb6c9e6106f59fcc | refs/heads/master | 2023-07-04T11:51:28.174018 | 2021-08-06T20:05:58 | 2021-08-06T20:05:58 | 255,226,334 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 466 | py | import random
def rnum():
return (random.randint(0,10))
fnum=rnum()
cou = 0
while True:
print("Guesses the no :")
cou=cou+1
G=int(input())
if fnum == G :
print("You guessed right in " + str(cou)+" Guess")
break
elif fnum > G:
print("You guessed LOW")
continue
elif fnum < G:
print("You guessed High")
continue
else:
continue
| [
"[email protected]"
] | |
088293b7dea6a8f0f04a083b5246bc45e276a471 | c584fd0c13d2f396aaf940e9d4e774f01ce5e2ce | /apps/users/apps.py | 5838b89ccd4979ad020b44afbd2652bfef00306a | [] | no_license | wxrapha/RoadBicycle | ee07601458b5af72b1a853b2c4c969d6cdf81ae8 | 0f990267a47b738e97880c999ed3bc054c2889c7 | refs/heads/master | 2021-07-10T13:30:25.484884 | 2017-10-10T23:52:14 | 2017-10-10T23:52:20 | 103,798,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | from django.apps import AppConfig
class UsersConfig(AppConfig):
name = 'apps.users'
verbose_name = u'用户信息'
| [
"[email protected]"
] | |
c261e60b78e10d19087b88625ff766401bcafd78 | 0bcd538401c0f7ffa61047c44ca5f497afc56f82 | /datasets/flowers.py | ed865e8b5d968ed02fc87f1c1cd0d06a01e91ad0 | [
"Apache-2.0"
] | permissive | Ningchen-Wang/DCGAN | fae14ae1bb6147caf600396d1689cc3877dacb37 | 4ba9fd4cdb89c809e90511427f85e88a589671be | refs/heads/master | 2021-08-10T11:09:44.577277 | 2017-11-12T13:36:22 | 2017-11-12T13:36:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,267 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides data for the flowers dataset.
The dataset scripts used to create the dataset can be found at:
tensorflow/models/slim/datasets/download_and_convert_flowers.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from datasets import dataset_utils
slim = tf.contrib.slim
_FILE_PATTERN = 'flowers_%s_*.tfrecord'
SPLITS_TO_SIZES = {'train': 3320, 'validation': 350}
_NUM_CLASSES = 5
_ITEMS_TO_DESCRIPTIONS = {
'image': 'A color image of varying size.',
'label': 'A single integer between 0 and 4',
}
def get_split(split_name, dataset_dir, file_pattern=None, reader=None):
"""Gets a dataset tuple with instructions for reading flowers.
Args:
split_name: A train/validation split name.
dataset_dir: The base directory of the dataset sources.
file_pattern: The file pattern to use when matching the dataset sources.
It is assumed that the pattern contains a '%s' string so that the split
name can be inserted.
reader: The TensorFlow reader type.
Returns:
A `Dataset` namedtuple.
Raises:
ValueError: if `split_name` is not a valid train/validation split.
"""
if split_name not in SPLITS_TO_SIZES:
raise ValueError('split name %s was not recognized.' % split_name)
if not file_pattern:
file_pattern = _FILE_PATTERN
file_pattern = os.path.join(dataset_dir, file_pattern % split_name)
# Allowing None in the signature so that dataset_factory can use the default.
if reader is None:
reader = tf.TFRecordReader
keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),
'image/format': tf.FixedLenFeature((), tf.string, default_value='png'),
'image/class/label': tf.FixedLenFeature(
[], tf.int64, default_value=tf.zeros([], dtype=tf.int64)),
}
items_to_handlers = {
'image': slim.tfexample_decoder.Image(),
'label': slim.tfexample_decoder.Tensor('image/class/label'),
}
decoder = slim.tfexample_decoder.TFExampleDecoder(
keys_to_features, items_to_handlers)
labels_to_names = None
if dataset_utils.has_labels(dataset_dir):
labels_to_names = dataset_utils.read_label_file(dataset_dir)
print('@')
print(file_pattern)
return slim.dataset.Dataset(
data_sources=file_pattern,
reader=reader,
decoder=decoder,
num_samples=SPLITS_TO_SIZES[split_name],
items_to_descriptions=_ITEMS_TO_DESCRIPTIONS,
num_classes=_NUM_CLASSES,
labels_to_names=labels_to_names)
| [
"[email protected]"
] | |
bded7a0abc4bf1dc4955561f7e0715bcba19006f | 7bd5ca970fbbe4a3ed0c7dadcf43ba8681a737f3 | /codeforces/cf326-350/cf334/b.py | 3d79209e1a77d7ad5f7c126cf1c70b802e0ece89 | [] | no_license | roiti46/Contest | c0c35478cd80f675965d10b1a371e44084f9b6ee | c4b850d76796c5388d2e0d2234f90dc8acfaadfa | refs/heads/master | 2021-01-17T13:23:30.551754 | 2017-12-10T13:06:42 | 2017-12-10T13:06:42 | 27,001,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,258 | py | # -*- coding: utf-8 -*-
import sys,copy,math,heapq,itertools as it,fractions,re,bisect,collections as coll
mod = 10**9 + 7
class UnionFind:
def __init__(self, size):
self.rank = [0] * size
self.par = range(size)
self.g_num = size
def find(self, x):
if x == self.par[x]: return x
self.par[x] = self.find(self.par[x])
return self.par[x]
def same(self, x, y):
return self.find(x) == self.find(y)
def unite(self, x, y):
x, y = self.find(x), self.find(y)
if x == y: return
self.g_num -= 1
if (self.rank[x] > self.rank[y]):
self.par[y] = x
else:
self.par[x] = y
if (self.rank[x] == self.rank[y]): self.rank[y] += 1
def group_num(self):
return self.g_num
#prime = [1] * 1000005
#prime[0] = prime[1] = 0
#for i in xrange(int(1000005**0.5) + 1):
# if prime[i]:
# prime[2*i::i] = [0] * len(prime[2*i::i])
p, k = map(int, raw_input().split())
if k == 0:
print pow(p, p - 1, mod)
exit()
uf = UnionFind(p)
cnt = 0
for x in xrange(p):
if x == k*x % p:
if k > 1:
cnt += 1
else:
uf.unite(x, k*x % p)
ans = pow(p, uf.group_num() - cnt, mod)
print ans
| [
"[email protected]"
] | |
cd65aa3b646cd5e825a104e8767b2dbe1068af20 | b741252e3677dd2e981d452a14b41b182ebac18b | /hexomap/virtualdiffractor.py | eaab8a5029aa9b9ea1c36e5495136a499c652b89 | [
"BSD-3-Clause"
] | permissive | KedoKudo/HEXOMAP | 01960559c2c88fc37962f966ed43b13b169bc90f | 81f1c200fe5a3ad035adf22e941e08588192d513 | refs/heads/master | 2020-04-29T05:41:27.724239 | 2019-08-26T19:06:24 | 2019-08-26T19:06:24 | 175,891,640 | 0 | 0 | null | 2019-03-15T21:10:54 | 2019-03-15T21:10:53 | null | UTF-8 | Python | false | false | 7,181 | py | #!/usr/bin/env python
"""
Module of components for virtual diffraction.
"""
import os
import yaml
import numpy as np
from dataclasses import dataclass
from itertools import product
from hexomap.orientation import Frame
from hexomap.npmath import norm
from hexomap.utility import iszero
# -- Define standard frames commmonly used for NF/FF-HEDM --
STD_FRAMES = {
'APS': Frame(
e1=np.array([ 1, 0, 0]), # APS_X
e2=np.array([ 0, 1, 0]), # APS_Y
e3=np.array([ 0, 0, 1]), # APS_Z
o =np.array([ 0, 0, 0]), # rotation stage center
name='aps'
),
"Detector": Frame(
e1=np.array([-1, 0, 0]), # detector_j
e2=np.array([ 0,-1, 0]), # detector_k
e3=np.array([ 0, 0, 1]), # detector_n, norm
o =np.array([ 0, 0, 5]), # rotation stage center, assuming 5mm detector distance
name='detector_1'
),
}
# -- Define the materials data folder direction
DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "data")
MATS_DIR = os.path.join(DATA_DIR, 'materials')
@dataclass
class Detector:
frame: "Frame" = STD_FRAMES["Detector"]
resolution: tuple = (2048, 2048) # number of pixels
pix_size: tuple = (0.00148, 0.00148) # mm or m?
# Move
def transform_detector(self, m: np.ndarray) -> None:
"""
Description
-----------
Transfer detector frame using given transformation matrix.
Parameters
----------
m: np.ndarray, (4, 4)
Transformation matrix containing both translation and rotation
Returns
-------
None
"""
pass
# IntersectoinIdx
def acquire_signal(self,
scatter_vec: np.ndarray,
bragg_angle: float,
eta: float,
) -> tuple:
"""
Description
-----------
Parameters
----------
Returns
-------
"""
pass
# BackProj
def back_projection(self,
signal_position: tuple, # (J,K) in pixels
omega: float,
bragg_angle: float,
eta: float,
target_frame: "Frame"
) -> tuple:
"""
"""
pass
@dataclass
class Crystal:
name: str
atoms: list
atomz: list
lattice: str
lattice_constant: list
def __post_init__(self):
# construct the unit cell (prism) for given crystal
self.prism = Crystal.prism_from_lattice_constant(self.lattice_constant)
def structure_factor(self, hkl):
"""Calculate structure factor"""
return np.dot(self.atomz,
np.exp(-2*np.pi*1j*np.dot(np.array(self.atoms), np.array(hkl).reshape((3, 1)))),
)
def scatter_vecs(self, q_max: int) -> list:
"""Generate scattering vectors with Eward sphere capped at q_max"""
recip_prism = Crystal.prism_to_reciprocal(self.prism)
h_max, k_max, l_max = (q_max/norm(recip_prism, axis=0)).astype(int)
hkls = product(range(-h_max, h_max+1),
range(-k_max, k_max+1),
range(-l_max, l_max+1),
)
return [
np.dot(recip_prism, hkl)
for hkl in hkls
if not iszero(sum(map(abs, hkl))) # hkl != [000]
if norm(hkl) <= q_max # within Eward sphere
if not iszero(self.structure_factor(hkl)) # non-vanishing
]
@staticmethod
def load(element:str, name: str) -> 'Crystal':
"""
Description
-----------
Load material config for given materials from data base
Parameters
----------
element: str
main element, for example, titanium for Ti64
name: str
abbreviation for target material, for example Ti64 for Ti-6Al-4V
Returns
-------
Crystal
"""
with open(os.path.join(MATS_DIR, f"{element}.yml"), 'r') as f:
mat_dict = yaml.safe_load(f)['variants'][name]
return Crystal(
name,
[me['pos'] for me in mat_dict['atoms']],
[me['atomic_number'] for me in mat_dict['atoms']],
mat_dict['crystal_structure'],
[val for _, val in mat_dict['lattice_constant'].items()]
)
@staticmethod
def prism_from_lattice_constant(lattice_constant: list,
in_degrees=True,
) -> np.ndarray:
"""
Description
-----------
Calculate the unit cell prism expressed in crystal Frame
Parameters
----------
lattice_constat: list
lattice constants for target crystal
in_degrees: bool
unit of alpha, beta, gamma in lattice constants
Returns
-------
np.ndarray
column-stacked base vectors for the unit cell prism expressed in
crystal frame
"""
a, b, c, alpha, beta, gamma = lattice_constant
if in_degrees:
alpha, beta, gamma = np.radians([alpha, beta, gamma])
# compute unit cell from lattice constants
# ref:
# https://github.com/KedoKudo/cyxtal/blob/master/documentation/dev/development.pdf
c_a, c_b, c_g = np.cos([alpha, beta, gamma])
s_g = np.sin(gamma)
factor = 1 + 2*c_a*c_b*c_g - c_a**2 - c_b**2 - c_g**2
v_cell = a*b*c*np.sqrt(factor)
v1 = [a, 0, 0]
v2 = [b*c_g, b*s_g, 0.0]
v3 = [c*c_b, c*(c_a-c_b*c_g)/(s_g), v_cell/(a*b*s_g)]
return np.column_stack((v1,v2,v3))
@staticmethod
def prism_to_reciprocal(prism: np.ndarray) -> np.ndarray:
"""
Description
-----------
Calcualte the reciprocal dual of given prism (column stacked)
ref:
https://en.wikipedia.org/wiki/Reciprocal_lattice
Parameters
----------
prism: np.ndarray
unit cell prism
Returns
-------
np.ndarray
Reciprocal dual of the unit cell prism
NOTE:
use pinv to avoid singular matrix from ill-positioned problem
"""
return np.transpose(2*np.pi*np.linalg.pinv(prism))
# TODO:
# Finish the collector after the Detector and Crystal class refactor is complete
def collect_virtual_patterns(detector: 'Detector',
xtal: 'Crystal',
):
"""
Generate list of peaks (HEDM patterns) for given crystal(sample) on the target detector
"""
pass
if __name__ == "__main__":
# example_1:
xtal = Crystal.load('gold', 'gold_fcc')
print(xtal.prism)
print(Crystal.prism_to_reciprocal(xtal.prism))
print(norm(Crystal.prism_to_reciprocal(xtal.prism), axis=0))
print(xtal.scatter_vecs(3))
| [
"[email protected]"
] | |
889f0a628633729df0d4a4bc22d12bf0d72f43b7 | 548e34ebef0904ad21efa3fd53a10b6af1011152 | /uva/10180 - geom/gen_data.py | 8b3c2f71c5cacf2a069b847041645d4e8e623ae2 | [] | no_license | eric7237cire/CodeJam | 1646f43ec34b87edf8029b839b01fb0d667758ce | ca9dd1ddad5c42a02731c3c7c8e4342bd5c9ee9d | refs/heads/master | 2021-06-08T17:47:53.204535 | 2018-01-01T15:34:29 | 2018-01-01T15:34:29 | 1,331,950 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 420 | py | import sys
import random
import math
random.seed()
T = 30000
MAX = .1
print( str(T) )
for t in range(T):
r = random.random() * MAX
p = 0
list = []
while len(list) < 4:
x = -MAX + random.random() * MAX * 2
y = -MAX + random.random() * MAX * 2
if math.hypot(x, y) <= r + .00001:
continue
list.append(x)
list.append(y)
list.append(r)
print( " ".join([ "%.5f" % x for x in list ] ) )
| [
"[email protected]"
] | |
098998f8d95c610204722f8f0990286191492db1 | e9a0efee2089b1c3bf843633c7b226638bc09e0d | /DataStructures and Algorithms/Ammortization onArrays/CaesarCipher.py | 5bb577ea9668f61442f19fefda679a1816f4a8c4 | [
"MIT"
] | permissive | abhishekratnam/Datastructuresandalgorithmsinpython | 41226cf41d288e24dbe9cd9643650151cb2a1037 | 9339319f441755797f4d2818ac9cf742a63ab5ea | refs/heads/master | 2020-04-15T03:16:24.337787 | 2019-02-01T23:47:52 | 2019-02-01T23:47:52 | 164,342,637 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,445 | py | class CaesarCipher:
"""Class for doing encryption and decryption using a Caesar Cipher."""
def __init__(self,shift):
"""Construct Caesar Cipher using given integer shift for rotation."""
encoder = [None] * 26
decoder = [None] * 26
for k in range(26):
encoder[k] = chr((k + shift)%26 + ord('A'))
decoder[k] = chr((k - shift)%26 + ord('A'))
self._forward = ''.join(encoder)
self._backward = ''.join(decoder)
def encrypt(self, message):
"""Return string representing encripted message."""
return self._transform(message, self._forward)
def decrypt(self, secret):
"""Returns the decrypted message with given secret."""
return self._transform(secret, self._backward)
def _transform(self, original, code):
"""Utility to perform transformation based on given code string."""
msg = list(original)
for k in range(len(msg)):
if msg[k].isupper():
j = ord(msg[k]) - ord('A')
msg[k] = code[j]
return ''.join(msg)
if __name__ == '__main__':
cipher = CaesarCipher(3)
message = "THE EAGLE IS IN PLAY; MEET AT JOE'S."
coded = cipher.encrypt(message)
print('Secret:', coded)
answer = cipher.decrypt(coded)
print('Message: ', answer)
| [
"[email protected]"
] | |
79445dc9be69e70168bbf832fc269c16f8377373 | c5859d1bdf44c8452563f856dc4191b74e85ce21 | /custom_components/image_processing/tagbox.py | 163ce385bf2c8182fd5f439a3f58b3d206199a0e | [] | no_license | balloob/homeassistant-config | 46774ea88ced4414e48e4f1f40af63ff67b6f990 | 9f341e4b695db56f3c4af7299a336d5a0f60cdcf | refs/heads/master | 2020-03-21T03:10:31.729526 | 2018-06-18T18:27:54 | 2018-06-18T18:27:54 | 138,039,924 | 11 | 0 | null | 2018-06-20T13:56:12 | 2018-06-20T13:56:12 | null | UTF-8 | Python | false | false | 4,157 | py | """
Component that will search images for tagged objects via a local
machinebox instance.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/image_processing.tagbox
"""
import base64
import requests
import logging
import time
import voluptuous as vol
from homeassistant.core import split_entity_id
import homeassistant.helpers.config_validation as cv
from homeassistant.components.image_processing import (
PLATFORM_SCHEMA, ImageProcessingEntity, CONF_SOURCE, CONF_ENTITY_ID,
CONF_NAME, DOMAIN)
_LOGGER = logging.getLogger(__name__)
CONF_ENDPOINT = 'endpoint'
CONF_TAGS = 'tags'
ROUNDING_DECIMALS = 2
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_ENDPOINT): cv.string,
vol.Optional(CONF_TAGS, default=[]):
vol.All(cv.ensure_list, [cv.string]),
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the classifier."""
entities = []
for camera in config[CONF_SOURCE]:
entities.append(Tagbox(
camera.get(CONF_NAME),
config[CONF_ENDPOINT],
camera[CONF_ENTITY_ID],
config[CONF_TAGS],
))
add_devices(entities)
class Tagbox(ImageProcessingEntity):
"""Perform a tag search via a Tagbox."""
def __init__(self, name, endpoint, camera_entity, tags):
"""Init with the API key and model id"""
super().__init__()
if name: # Since name is optional.
self._name = name
else:
self._name = "Tagbox {0}".format(
split_entity_id(camera_entity)[1])
self._camera = camera_entity
self._default_tags = {tag: 0.0 for tag in tags}
self._tags = self._default_tags
self._url = "http://{}/tagbox/check".format(endpoint)
self._state = "no_processing_performed"
self._response_time = None
def process_image(self, image):
"""Process an image."""
timer_start = time.perf_counter()
try:
response = requests.post(
self._url,
json=self.encode_image(image)
).json()
except:
response = {'success': False}
if response['success']:
elapsed_time = time.perf_counter() - timer_start
self._response_time = round(elapsed_time, ROUNDING_DECIMALS)
self._tags, self._state = self.process_response(response)
else:
self._state = "Request_failed"
self._tags = self._default_tags
def encode_image(self, image):
"""base64 encode an image stream."""
base64_img = base64.b64encode(image).decode('ascii')
return {"base64": base64_img}
def process_response(self, response):
"""Process response data, returning the processed tags and state."""
tags = self._default_tags.copy()
tags.update(self.process_tags(response['tags']))
if response['custom_tags']:
tags.update(self.process_tags(response['custom_tags']))
# Default tags have probability 0.0 and cause an exception.
try:
state = max(tags.keys(), key=(lambda k: tags[k]))
except:
state = "No_tags_identified"
return tags, state
def process_tags(self, tags_data):
"""Process tags data, returning the tag and rounded confidence."""
processed_tags = {
tag['tag'].lower(): round(tag['confidence'], ROUNDING_DECIMALS)
for tag in tags_data
}
return processed_tags
@property
def camera_entity(self):
"""Return camera entity id from process pictures."""
return self._camera
@property
def device_state_attributes(self):
"""Return other details about the sensor state."""
attr = self._tags.copy()
attr.update({'response_time': self._response_time})
return attr
@property
def state(self):
"""Return the state of the entity."""
return self._state
@property
def name(self):
"""Return the name of the sensor."""
return self._name
| [
"[email protected]"
] | |
6e40ec6f6b3b14aa33b9e1e5a07f218ba7ee36e0 | 00d2f3fde2c3d9e03a1babc958e35285d5798352 | /removedependent.py | 626bf7416873208dd75191cd10f065def3a4c318 | [] | no_license | N-S-Krishnan/Database-GUI-Elmasri-and-Navathe- | 14043e90c2e25e6c5ab080cc5efe985731479b93 | f8a60edad75505ad0587f3a3562cfc14cc0d018f | refs/heads/main | 2023-04-22T07:34:54.141788 | 2021-04-26T01:07:05 | 2021-04-26T01:07:05 | 361,572,801 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,474 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'removedependent.ui'
#
# Created by: PyQt5 UI code generator 5.15.2
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtWidgets import QTableWidgetItem
import mysql.connector
from PyQt5.QtCore import QRegExp
from PyQt5.QtGui import QRegExpValidator
class Ui_RemoveDependent(object):
passedssn = -1
deldepname = ""
db = None # mysql connection
def __init__(self, obj):
self.passedssn = obj.textEdit.text()
def setupUi(self, RemoveDependent):
RemoveDependent.setObjectName("RemoveDependent")
RemoveDependent.resize(700, 505)
self.buttonBox = QtWidgets.QDialogButtonBox(RemoveDependent)
self.buttonBox.setGeometry(QtCore.QRect(310, 420, 321, 31))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Close)
self.buttonBox.setObjectName("buttonBox")
self.verticalLayoutWidget = QtWidgets.QWidget(RemoveDependent)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(30, 80, 641, 201))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.tabdependents = QtWidgets.QTableWidget(self.verticalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.tabdependents.sizePolicy().hasHeightForWidth())
self.tabdependents.setSizePolicy(sizePolicy)
self.tabdependents.setMinimumSize(QtCore.QSize(639, 0))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.tabdependents.setFont(font)
self.tabdependents.setAutoFillBackground(True)
self.tabdependents.setGridStyle(QtCore.Qt.SolidLine)
self.tabdependents.setRowCount(10)
self.tabdependents.setColumnCount(4)
self.tabdependents.setObjectName("tabdependents")
item = QtWidgets.QTableWidgetItem()
self.tabdependents.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tabdependents.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.tabdependents.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.tabdependents.setHorizontalHeaderItem(3, item)
self.tabdependents.horizontalHeader().setSortIndicatorShown(False)
self.verticalLayout.addWidget(self.tabdependents)
self.label_2 = QtWidgets.QLabel(RemoveDependent)
self.label_2.setGeometry(QtCore.QRect(30, 360, 161, 16))
self.label_2.setObjectName("label_2")
self.empssn = QtWidgets.QLineEdit(RemoveDependent)
self.empssn.setGeometry(QtCore.QRect(90, 20, 101, 31))
self.empssn.setObjectName("empssn")
self.gobutton = QtWidgets.QPushButton(RemoveDependent)
self.gobutton.setGeometry(QtCore.QRect(40, 420, 93, 28))
self.gobutton.setObjectName("gobutton")
self.dname = QtWidgets.QTextEdit(RemoveDependent)
self.dname.setGeometry(QtCore.QRect(230, 350, 271, 31))
self.dname.setObjectName("dname")
self.label = QtWidgets.QLabel(RemoveDependent)
self.label.setGeometry(QtCore.QRect(30, 20, 71, 21))
self.label.setObjectName("label")
self.empname = QtWidgets.QLabel(RemoveDependent)
self.empname.setGeometry(QtCore.QRect(240, 20, 71, 21))
self.empname.setObjectName("empname")
self.empname_2 = QtWidgets.QTextEdit(RemoveDependent)
self.empname_2.setGeometry(QtCore.QRect(310, 20, 261, 31))
self.empname_2.setObjectName("empname_2")
self.label_6 = QtWidgets.QLabel(RemoveDependent)
self.label_6.setGeometry(QtCore.QRect(30, 310, 121, 16))
self.label_6.setObjectName("label_6")
self.depcount = QtWidgets.QTextEdit(RemoveDependent)
self.depcount.setGeometry(QtCore.QRect(210, 300, 31, 31))
self.depcount.setObjectName("depcount")
self.retranslateUi(RemoveDependent)
self.buttonBox.rejected.connect(RemoveDependent.reject)
QtCore.QMetaObject.connectSlotsByName(RemoveDependent)
self.empssn.setText(self.passedssn)
self.empssn.setDisabled(True)
self.select_data()
self.tabdependents.clicked.connect(self.select_depname)
self.gobutton.clicked.connect(self.processdelete)
def retranslateUi(self, RemoveDependent):
_translate = QtCore.QCoreApplication.translate
RemoveDependent.setWindowTitle(_translate("RemoveDependent", "RemoveDependent"))
self.tabdependents.setSortingEnabled(True)
item = self.tabdependents.horizontalHeaderItem(0)
item.setText(_translate("RemoveDependent", "Name"))
item = self.tabdependents.horizontalHeaderItem(1)
item.setText(_translate("RemoveDependent", "Sex"))
item = self.tabdependents.horizontalHeaderItem(2)
item.setText(_translate("RemoveDependent", "Date of Birth"))
item = self.tabdependents.horizontalHeaderItem(3)
item.setText(_translate("RemoveDependent", "Relationship"))
self.label_2.setText(_translate("RemoveDependent", "Name to Delete:"))
self.gobutton.setText(_translate("RemoveDependent", "Delete"))
self.label.setText(_translate("RemoveDependent", "Emp SSN"))
self.empname.setText(_translate("RemoveDependent", "Emp Name"))
self.label_6.setText(_translate("RemoveDependent", "Dependent Count"))
def select_data(self):
# Retrieve data on existing dependents that correspond to an ssn value given
try:
#print("select dependents")
self.db = mysql.connector.connect(option_files='mydb.conf')
cursor = self.db.cursor()
cursor.execute("select concat(fname, ' ', minit,' ', lname) empname from employee where ssn =" + str(self.passedssn) +
" for update ")
for row in cursor:
self.empname_2.setText(row[0])
#print(row)
self.empname_2.setDisabled(True)
nrows = cursor.rowcount
#print('nrows', nrows)
if nrows <= 0 :
msg = QtWidgets.QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText("No employee with ssn "+ str(self.passedssn))
msg.setWindowTitle("Add Dependent")
msg.exec()
self.reject()
cursor.execute("select dependent_name, sex, bdate, relationship from dependent where essn =" + str(self.passedssn) +
" for update ")
result = cursor.fetchall()
nrows = cursor.rowcount
self.depcount.setText(str(nrows))
self.depcount.setDisabled(True)
self.tabdependents.setRowCount(0)
for rnum, rdata in enumerate(result):
self.tabdependents.insertRow(rnum)
for colnum, cdata in enumerate(rdata):
self.tabdependents.setItem(rnum, colnum,QTableWidgetItem(str(cdata)))
#self.tabdependents.setDisabled(True)
except mysql.connector.Error as e:
msg = QtWidgets.QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText("SQL Error "+ str(e.msg))
msg.setWindowTitle("Add Dependent")
msg.exec()
def select_depname(self, item):
cellContent = item.data()
#print(cellContent) # test
#sf = "You clicked on {} {}".format(item.row(), item.column())
#print(sf)
myrow = item.row()
mycol = item.column()
if mycol == 0:
self.dname.setText(cellContent)
self.deldepname = cellContent
def processdelete(self, item):
if self.dname != "":
self.db = mysql.connector.connect(option_files='mydb.conf')
cursor = self.db.cursor()
# The number of variables we pass to the delete query is small enough where we can place them directly into
# the string that forms the sql query
cursor.execute("delete from dependent where essn =" + str(self.passedssn)+" and dependent_name = '"+self.deldepname+"'")
self.db.commit()
msg = QtWidgets.QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText("Deleted dependent with essn "+ str(self.passedssn) + " dep name '" + self.deldepname +"'")
msg.setWindowTitle("Delete Dependent")
msg.exec()
self.dname.setText("")
self.select_data()
def reject(self):
#print("in reject")
self.db.commit()
self._close()
QDialog.reject(self)
| [
"[email protected]"
] | |
f4d0fbd3015939c5f1fbedeb7e90834ae6473193 | b02a2c1e8cf778f8f810897c478abcec720b7220 | /ds_algos_primer/python/arrays_and_strings_solutions.py | a70325060b6be3c328ac7c8980a14ffffa4407b0 | [] | no_license | naraekwon/CodingInterviewMastery | d8596a4decb52086ea2eefa32ebccd4a25c6181a | c14ceaa19649269467160a5bf53e4a3d927e97a5 | refs/heads/main | 2023-09-05T16:28:25.253482 | 2021-11-04T18:56:19 | 2021-11-04T18:56:19 | 428,470,905 | 0 | 0 | null | 2022-02-13T19:54:09 | 2021-11-16T00:58:46 | null | UTF-8 | Python | false | false | 15,724 | py | """
Title: Arrays and Strings Solutions
This file contains the solutions for the Arrays and Strings exercises in
the DS & Algos Primer. If you have not already attempted these exercises,
we highly recommend you complete them before reviewing the solutions here.
Execution: python arrays_and_strings_solutions.py
*** IMPORTANT NOTE ***
Python provides a lot of inbuilt functions to accomplish certain tasks. If you
are aware of these, that's great.
HOWEVER, the goal of these exercises is to understand these data structures.
Therefore, you are discouraged from writing one- to two-line functions. Instead
you will learn a lot more by implementing these things manually.
In your interview, you may want to use these inbuilt functions, but while
learning, you will learn more by doing things the hard way.
"""
from collections import Counter
from typing import List
"""
Exercise 1.1: Write a function that takes an integer array and reverses
the values in place
Time Complexity: O(len(arr))
Space Complexity: O(1)
"""
def reverse_array(arr: List[int]):
# We will iterate to the midpoint of the array. For each value, we can
# get the index its supposed to swap with by computing arr.length-i-1
for i in range(len(arr)//2):
temp = arr[i]
arr[i] = arr[len(arr)-i-1]
arr[len(arr)-i-1] = temp
"""
Exercise 1.2: Given a 2D matrix, write a function to print the values in
the matrix in a clockwise spiral from outside to inside
Time Complexity: O(len(arr) * len(arr[0]))
Space Complexity: O(1)
"""
def print_spiral(arr: List[List[int]]):
# We need to keep track of the boundaries of the current layer of the
# spiral that we are traversing
min_row = 0
min_col = 0
max_row = len(arr)-1
max_col = len(arr[0])-1
# Once the mins and maxes converge, we are at the center of the spiral.
# The spiral follows a fixed set of steps. We go left, down, right, up.
# For each of these, we just interate to the bounds, so we express each
# one explicitly.
while min_row < max_row and min_col < max_col:
# Go across the top
for col in range(min_col, max_col+1):
print(arr[min_row][col])
min_row = min_row+1
# Go down the right side
for row in range(min_row, max_row+1):
print(arr[row][max_col])
max_col = max_col-1
# Go across the bottom
for col in range(max_col, min_col-1, -1):
print(arr[max_row][col])
max_row = max_row-1
# Go up the left side
for row in range(max_row, min_row-1, -1):
print(arr[row][min_col])
min_col = min_col+1
"""
Exercise 1.3: Given a 2D matrix, write a function to print the values in the
matrix in a zig-zag order
Time Complexity: O(len(arr) * len(arr[0]))
Space Complexity: O(1)
"""
def print_diagonals(arr: List[List[int]]):
row = 0
col = 0
# Like the spiral, we have clearly defined directions we need to go. In
# this case we either go up to the right or down to the left. We define
# each of these explicitly and just go back and forth between doing one
# and the other
while True:
# Go up to the right
while row > 0 and col < len(arr[0])-1:
print(arr[row][col])
row = row-1
col = col+1
# Without this we won't print the final value in the diagonal
print(arr[row][col])
# Check whether we're at the botom right corner
if row == len(arr)-1 and col == len(arr[0])-1:
break
# We need to update our positiion differently depending on whether
# we're still going along the top of the matrix or down the
# righthand side
elif col+1 < len(arr[0]):
col = col+1
else:
row = row+1
# Go down to the left
while row < len(arr)-1 and col > 0:
print(arr[row][col])
row = row+1
col = col-1
# Without this we won't print the final value in the diagonal
print(arr[row][col])
# Check whether we're at the botom right corner
if row == len(arr)-1 and col == len(arr[0])-1:
break
# Are we going along the lefthand side or the bottom?
elif row+1 < len(arr):
row = row+1
else:
col = col+1
"""
Exercise 1.4: Write a function that takes in a string and removes every
even-indexed character
Time Complexity: O(len(s))
Space Complexity: O(len(s))
"""
def remove_even(s: str) -> str:
# Build the string as a list first and then join everything together
result = []
# Increment by 2 each time to only visit odd indices
for i in range(1, len(s), 2):
result.append(s[i])
return ''.join(result)
"""
Exercises 1.5: Zig Zag Conversion
Full Problem Definition: https://leetcode.com/problems/zigzag-conversion/
Time Complexity: O(len(s))
Space Complexity: O(len(s))
"""
def zig_zag(s: str, num_rows: int) -> str:
# Compute each row and then merge them at the end
rows = [ [] for _ in range(num_rows)]
# We have 2 actions. First we iterate down over each row, then we iterate
# back up. Do one then the other
idx = 0
while idx < len(s):
# Iterate from row 0 to num_rows-1
i = 0
while i < len(rows) and idx < len(s):
rows[i].append(s[idx])
idx = idx+1
i = i+1
# Iterate back up from numRows-2 to 1. Make sure we go from numRows-2 to
# 1 and not numRows-1 to 0 because otherwise we'll add 2 characters to
# row 0 and 2 characters to row numRows-1
i = len(rows)-2
while i >= 1 and idx < len(s):
rows[i].append(s[idx])
idx = idx+1
i = i-1
# Combine everything together
result = []
for row in rows:
result.append(''.join(row))
return ''.join(result)
"""
Exercise 2.1: Given a string, print out all of the substrings
Time Complexity: O(len(s)^2)
Space Complexity: O(1)
"""
def print_substrings(s: str):
for i in range(len(s)):
for j in range(i+1, len(s)+1):
print(s[i:j])
"""
Exercise 2.2: Write a function to find all duplicates in an array. The array
will contain exactly 1 duplicated value
Time Complexity: O(len(arr)^2)
Space Complexity: O(1)
"""
def find_duplicates(arr: List[int]) -> int:
# Use 2 pointers to compare each pair of values
for i in range(len(arr)):
for j in range(i+1, len(arr)):
if arr[i] == arr[j]:
return arr[i]
"""
Exercise 2.3: Given a sorted array, find every pair of values in the
array that sum up to a given target
Time Complexity: O(len(arr))
Space Complexity: O(1)
"""
def two_sum(arr: List[int], target: int) -> List[List[int]]:
result = []
# We start our pointers at the beginning and move towards the center
i = 0
j = len(arr)-1
while i < j:
sum = arr[i] + arr[j]
# If we found the target, we add it to the result. Then we either
# increment i or decrement j. It doesn't matter which we do
if sum == target:
result.append([arr[i],arr[j]])
# We want to avoid including the same pair multiple times so we
# skip the pointer ahead to the next unique value. Since our
# array is sorted, we just keep incrementing until we see a
# new value
while arr[i] == arr[i+1]:
i = i+1
i = i+1
# We can find a larger sum by incrementing i. This makes the smaller
# value in our pair larger so the sum is larger
if sum < target:
i = i+1
# If it's too big, we do the opposite by decrementing j
if sum > target:
j = j-1
return result
"""
Exercise 3.1: Given two arrays, compare them to see if they are equal
Time Complexity: O(len(arr1))
Space Complexity: O(1)
"""
def arrays_are_equal(arr1: List[int], arr2: List[int]) -> bool:
# If they're not the same length they can't be equal
if len(arr1) != len(arr2):
return False
# Compare each value. If they're not equal then the arrays are unequal
for i in range(len(arr1)):
if arr1[i] != arr2[i]:
return False
return True
"""
Exercise 3.2: Given two strings, determine if one string is the reverse of the
other string
Time Complexity: O(len(s1))
Space Complexity: O(1)
"""
def strings_are_opposite(s1: str, s2: str) -> bool:
# If they're not the same length they can't be opposites
if len(s1) != len(s2):
return False
# Compare the opposing characters in each string. We could also just
# reverse one of the strings and compare them, but that takes extra
# space whereas this does not
for i in range(len(s1)):
if s1[i] != s2[len(s2)-i-1]:
return False
return True
"""
Exercise 3.3: Given two strings, determine whether they are anagrams of
each other
Time Complexity: O(len(s1))
Space Complexity: O(len(s1))
"""
def are_anagrams(s1: str, s2: str) -> bool:
# If they're not the same length they can't be anagrams
if len(s1) != len(s2):
return False
# Count the number of occurrences of each character in s1
chars = {}
for c in s1:
chars[c] = chars.get(c, 0) + 1
# Subtract the chars in s2 from the count. We should end up with 0 of
# each character left over
for c in s2:
# s1 doesn't contain c at all
if c not in chars:
return False
# s1 contains fewer occurrences of c than s2
chars[c] = chars[c]-1
if chars[c] < 0:
return False
return True
"""
Exercise 4.1: Given an array, compute the sum of each length-k subarray
Time Complexity: O(len(arr))
Space Complexity: O(1)
"""
def subarray_sums(arr: List[int], k: int) -> List[int]:
result = []
# Compute the sum of the initial length-k subarray
sum = 0
for i in range(k):
sum = sum + arr[i]
result.append(sum)
# Use a sliding window to go through the remainder of the array without
# recomputing the sum for every subarray
left = 0
right = k-1
while right < len(arr)-1:
# The value at right+1 needs to be added to the sum and the value
# at left needs to be subtracted
right = right+1
sum = sum + arr[right]
sum = sum - arr[left]
left = left + 1
result.append(sum)
return result
"""
Exercise 4.2: Given a string, find the longest substring of the string that does
not contain any repeated characters
Time Complexity: O(len(s))
Space Complexity: O(1)
"""
def no_repeated_chars(s: str) -> int:
# Track the characters in our current substring
in_substring = set()
max_substring = 0
left = 0
right = 0
# We expand right out as much as we can without getting duplicate chars. If
# we end up with duplicates, we increment left to shrink the substring until
# we no longer have duplicates
while right < len(s):
# We have a duplicate character, so increment left until the substring
# no longer contains duplicates
while s[right] in in_substring:
in_substring.remove(s[left])
left = left + 1
# We have a valid substring so is it the longest one?
max_substring = max(max_substring, right-left+1)
# Try expanding the substring again
in_substring.add(s[right])
right = right+1
return max_substring
"""
Exercise 4.3: Given two strings, s and p, find all occurrences of anagrams of p
in s. The output is the starting index of each anagram
Time Complexity: O(len(s))
Space Complexity: O(1)
"""
def find_all_anagrams(s: str, p: str) -> List[int]:
result = []
# This is another option for computing character counts instead of a dict
# since we know they're lowercase English chars. This is a little easier
# given the approach below than using a dict
chars = [0]*256
for c in p:
chars[ord(c)] = chars[ord(c)] + 1
# Do our sliding window
left = 0
right = 0
while right < len(s):
# Add in the right character to our current window. We account for this
# by removing it from the character count we have for p
right_char_ord = ord(s[right])
right = right + 1
chars[right_char_ord] = chars[right_char_ord] - 1
# If the value is negative, then we have too many of rightChar in our
# substring so we need to make it smaller until we no longer have too
# many of that character
while chars[right_char_ord] < 0:
chars[ord(s[left])] = chars[ord(s[left])] + 1
left = left + 1
# If we have the exact right number of occurrences of the character AND
# the substring is the right length, then this is a valid substring
if chars[right_char_ord] == 0 and right-left == len(p):
result.append(left)
return result
"""
Exercise 4.4: Given two strings, s and p, find the smallest substring of s that
contains all the characters in p
Time Complexity: O(len(s))
Space Complexity: O(1)
"""
def smallest_substring(s: str, p: str) -> str:
# Same as 4.3, we use an array to store character count
chars = [0]*256
for c in p:
chars[ord(c)] = chars[ord(c)] + 1
left = 0
right = 0
# In addition to tracking left and right, we'll track the start and length
# of the string, as well as the count of characters from p that we have in
# our substring. The count allows us to quickly see whether our substring
# includes all the characters in p or not
count = 0
min_length = float('inf')
min_start = 0
while right < len(s):
# This is basically opposite of 4.3 where we WANT all the values to get
# to 0 or negative because we want the string to be inclusive of all the
# characters in p
right_char_ord = ord(s[right])
right = right + 1
chars[right_char_ord] = chars[right_char_ord] - 1
if chars[right_char_ord] >= 0:
count = count + 1
# If count == p.length we have a valid substring. In this case, keep
# shrinking it as much as we can by incrementing left
while count == len(p):
if right - left < min_length:
min_length = right - left
min_start = left
# If we have extra of a character, we don't decrement the count
# until we have fewer occurrences of that char than there are in p
left_char_ord = ord(s[left])
chars[left_char_ord] = chars[left_char_ord] + 1
if chars[left_char_ord] > 0:
count = count + 1
left = left + 1
# If we don't find a valid substring, return ""
if (min_length > len(s)):
return ""
return s[min_start : min_start + min_length]
# Sample test cases
if __name__ == '__main__':
l = [1,2,3,4]
reverse_array(l)
print(l)
matrix = [[1,2,3,4,5],[6,7,8,9,10],[11,12,13,14,15],[16, 17,18,19,20]]
print_spiral(matrix)
print_diagonals(matrix)
print(remove_even("abcdef"))
print(zig_zag("PAYPALISHIRING", 3))
print_substrings("abcde")
print(find_duplicates([1,2,3,4,3,5]))
print(two_sum([1,2,2,2,3,4,5,6,6,6], 8))
print(arrays_are_equal([1,2,3,4], [1,2,3,4]))
print(strings_are_opposite("abcd", "dcba"))
print(are_anagrams("abcd", "cdab"))
print(subarray_sums([1,2,3,4,5], 3))
print(no_repeated_chars("abcdbea"))
print(find_all_anagrams("cbaebabacd", "abc"))
print(smallest_substring("aabbccdd", "abc"))
| [
"[email protected]"
] | |
92f03d5304cd9df07112f72bed3d35156851705e | f80f7529a68c168fd19dda179ad5ea762a7faa90 | /alembic/versions/ec62bcefe99d_create_categories_table.py | 6205d6232105826d204aa36c96074971f3eba8f4 | [
"MIT"
] | permissive | luisignaciocc/python-books-scraper | 76b9509444ca52af65411ca821ea9ffd8ff23a03 | a8dba19c1fd49a746711235219eb528a9743d108 | refs/heads/master | 2023-05-06T07:07:03.641242 | 2021-05-26T13:49:19 | 2021-05-26T13:49:19 | 349,744,499 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 985 | py | """Create categories table
Revision ID: ec62bcefe99d
Revises:
Create Date: 2021-03-28 14:19:59.387880
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ec62bcefe99d'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('categories',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=50), nullable=False),
sa.Column('url', sa.String(length=100), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name'),
sa.UniqueConstraint('url')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('categories')
# ### end Alembic commands ###
| [
"[email protected]"
] | |
9f173ba385b704082ea7dd42c3c5fcdda3c25bb0 | 8e6c4def374ba21c934f6856c0333a1e8bff69db | /190415/randomTest2.py | 059ba08f9ea23984706e9c2cf9d8f94c9a6c3e4f | [] | no_license | inuse918/Python_Practice_2 | d5a930a95b51181330abc6601d80f71b67780740 | de4dd6ec8d96e9d259566916b9e7f08402e7917d | refs/heads/master | 2020-05-06T13:20:08.153295 | 2019-12-25T23:07:47 | 2019-12-25T23:07:47 | 180,128,504 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | import random
time=random.randint(1,24)
sunny=random.choice([True,False])
print("지금 시각은 ",str(time),"시 입니다.")
#if (sunny==True)and((time>=6)and(time<=9)):
# print("현재 날씨는 화창합니다.")
if sunny:
print ("현재 날씨는 화창합니다.")
else :
print("현재 날씨는 화창하지 않습니다.")
# 종달새가 노래를 할 것인지를 판단해보자
if time>=6 and time<9 and sunny:
print("종달새가 노래를 합니다.")
else:
print("종달새가 노래를 하지 않습니다.")
| [
"[email protected]"
] | |
69ef378642a90c904e60bcd86fa6932e967ed311 | 032117bbf248a76abd25fcc2355bc8ade84fa76a | /inheritance_4.py | b62203cddf2bf1a42b3576a58752aaab34cfb71a | [] | no_license | shefaligoel136/python_summer_training | ba8f28f6af008584b4239c73d466e4e9d35b4b01 | 0b97fea050342fe4ed95b18c5f7ed885a6c8ca23 | refs/heads/master | 2022-11-13T07:22:32.855717 | 2020-07-06T08:33:19 | 2020-07-06T08:33:19 | 277,480,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | # using super
class a:
def __init__(self):
print("initof A")
def feature1(self):
print("feature 1 is working")
def feature2(self):
print("feature 2 is working")
class b(a):
def __init__(self):
super().__init__()
print("initof B")
def feature3(self):
print("feature 3 is working")
def feature4(self):
print("feature 4 is working")
k = b()
k.feature1() | [
"[email protected]"
] | |
5c0d30018cbe2c3ef11519938d2dcc3bbcfa328b | 267ab87884d6c74f8d676c1b6cfebf7e217e2ea7 | /index/views.py | 79a1320fcddf6b714ccc0465ccd2299e1bfd4d22 | [] | no_license | Emehinola/charlotte | 0d564181de1f5419a67c06e7dba5cd81796cb1aa | c3175757f5ce7d3ceab272dad9a866c4bea4bd1d | refs/heads/master | 2023-04-23T00:38:18.965089 | 2021-04-30T19:34:17 | 2021-04-30T19:34:17 | 363,119,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 816 | py | from django.shortcuts import render
from django.views import generic
from blog.models import Article, categories
# Create your views here.
class Home(generic.ListView):
model = Article
paginate_by = 30
template_name = 'index/home.html'
def get_context_data(self, **kwargs):
context = {
'must_read': Article.objects.filter(must_read=True)[:5],
'articles': Article.objects.all(),
'categories': get_category
}
return context
def get_category(): # return a list of blog categories
raw = []
readable = []
for i in categories:
raw.append(i[0]) # gets the first item of the list of tuples
readable.append(i[1]) # gets the second item of the list of tuples
output = zip(raw, readable)
return output
| [
"[email protected]"
] | |
4f763a66d6c6077358c6dadee57b52bddcadf918 | 2a9572e6f1cfb329a12d6835071483ec89ec6538 | /flask_test/flask_blog.py | 1ceec00beaf74f9b274f237f1860dfff21615f7f | [] | no_license | Cezar04/petproject | c8a4c810a8b05d0645dc36601539034dc35be6b5 | 9093d2435f779235db5f9e79417395e4dd13e8b0 | refs/heads/master | 2022-11-13T11:12:29.785362 | 2020-07-03T11:14:25 | 2020-07-03T11:14:25 | 276,875,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,859 | py | from flask import Flask, render_template, url_for, flash, redirect, request
from forms import registration_form, login_form, post_form
import data_manager
app = Flask(__name__)
app.config['SECRET_KEY'] = 'haker'
posts = [
{"author":"Gigel",
"title": "blog post 1",
"content":"First post content",
"date_posted": "marite 200001"},
{"author":"Gina gaina",
"title": "blog post 2",
"content":"First post content",
"date_posted": "marite 202"}
]
@app.route('/')
@app.route('/home')
def home():
return render_template('home.html', posts=posts)
@app.route('/about')
def about():
return render_template("about.html")
@app.route('/register', methods=["GET", "POST"])
def register():
form = registration_form()
if form.validate_on_submit():
flash(f"Account created for {form.username.data}!", "success")
return redirect(url_for("home"))
return render_template("register.html", title="Register", form=form)
@app.route('/login', methods=["GET", "POST"])
def login():
form = login_form()
if form.validate_on_submit():
if form.email.data == "[email protected]" and form.password.data == "1234":
flash('You are logged in!', 'success')
return redirect(url_for('home'))
else:
flash("Login failed, check username and password", 'danger')
return render_template("login.html", title="Login", form=form)
@app.route('/post/new', methods=["GET", "POST"])
def new_post():
form = post_form()
if form.validate_on_submit():
# post = posts(title=form.title.data, author=form.content.data, content=form.content.data)
flash("Post Created", "success")
return redirect(url_for("home"))
return render_template("create_post.html", title="New Post", form=form)
if __name__ == "__main__":
app.run(debug=True)
| [
"[email protected]"
] | |
2c003bc352ae9a2de24edeacf73478e603742dce | d27e62cb69beddbb52c63be4a7a1f54cf258ba67 | /services/users/project/api/models.py | 548b3d7c296c65573824fe23d1bb24b316ab824c | [] | no_license | gavinest/testdriven-app | 7f7f04278f148e42d30fa3b33b84c63bde0888ed | 44d83fc087e2adedeba48adbe739875d427a1de2 | refs/heads/master | 2020-03-29T18:17:21.734301 | 2018-10-24T12:59:35 | 2018-10-24T12:59:35 | 140,979,900 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,389 | py | import datetime
from flask import current_app
from sqlalchemy.sql import func
import jwt
from project import db, bcrypt
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
username = db.Column(db.String(128), unique=True, nullable=False)
email = db.Column(db.String(128), unique=True, nullable=False)
password = db.Column(db.String(255), nullable=False)
email = db.Column(db.String(128), unique=True, nullable=False)
password = db.Column(db.String(255), nullable=False)
active = db.Column(db.Boolean(), default=True, nullable=False)
created_date = db.Column(db.DateTime, default=func.now(), nullable=False)
admin = db.Column(db.Boolean, default=False, nullable=False)
def __init__(self, username, email, password, admin=False):
self.username = username
self.email = email
self.password = bcrypt.generate_password_hash(password).decode()
self.admin = admin
def to_json(self):
return {
'id': self.id,
'username': self.username,
'email': self.email,
'active': self.active,
'admin': self.admin
}
def encode_auth_token(self, user_id):
'''generates the auth token'''
try:
payload = {
'exp': datetime.datetime.utcnow() + datetime.timedelta(
days=current_app.config.get('TOKEN_EXPIRATION_DAYS'),
seconds=current_app.config.get('TOKEN_EXPIRATION_SECONDS')
),
'iat': datetime.datetime.utcnow(),
'sub': user_id
}
return jwt.encode(
payload,
current_app.config.get('SECRET_KEY'),
algorithm='HS256'
)
except Exception as e:
return e
@staticmethod
def decode_auth_token(auth_token):
'''
decodes the auth token - :param auth token: - :return: integer|string
'''
try:
payload = jwt.decode(
auth_token, current_app.config.get('SECRET_KEY'))
return payload['sub']
except jwt.ExpiredSignatureError:
return 'Signature expired. Please log in again.'
except jwt.InvalidTokenError:
return 'Invalid token. Please log in again.'
| [
"[email protected]"
] | |
de3fe45a87e82c646b0708bb94ef18a5f539f842 | 4d675034878c4b6510e1b45b856cc0a71af7f886 | /mmdet/models/seg_heads/panoptic_fusion_heads/heuristic_fusion_head.py | 06c1de2b9010fef13bd2322bbd3352d82a1f3e2f | [
"Apache-2.0",
"BSD-2-Clause-Views",
"MIT",
"BSD-2-Clause"
] | permissive | shinya7y/UniverseNet | 101ebc2ad8f15482ee45ea8d6561aa338a0fa49e | 3652b18c7ce68122dae7a32670624727d50e0914 | refs/heads/master | 2023-07-22T08:25:42.646911 | 2023-07-08T18:09:34 | 2023-07-08T18:09:34 | 263,555,721 | 407 | 58 | Apache-2.0 | 2023-01-27T01:13:31 | 2020-05-13T07:23:43 | Python | UTF-8 | Python | false | false | 4,482 | py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.core.evaluation.panoptic_utils import INSTANCE_OFFSET
from mmdet.models.builder import HEADS
from .base_panoptic_fusion_head import BasePanopticFusionHead
@HEADS.register_module()
class HeuristicFusionHead(BasePanopticFusionHead):
"""Fusion Head with Heuristic method."""
def __init__(self,
num_things_classes=80,
num_stuff_classes=53,
test_cfg=None,
init_cfg=None,
**kwargs):
super(HeuristicFusionHead,
self).__init__(num_things_classes, num_stuff_classes, test_cfg,
None, init_cfg, **kwargs)
def forward_train(self, gt_masks=None, gt_semantic_seg=None, **kwargs):
"""HeuristicFusionHead has no training loss."""
return dict()
def _lay_masks(self, bboxes, labels, masks, overlap_thr=0.5):
"""Lay instance masks to a result map.
Args:
bboxes: The bboxes results, (K, 4).
labels: The labels of bboxes, (K, ).
masks: The instance masks, (K, H, W).
overlap_thr: Threshold to determine whether two masks overlap.
default: 0.5.
Returns:
Tensor: The result map, (H, W).
"""
num_insts = bboxes.shape[0]
id_map = torch.zeros(
masks.shape[-2:], device=bboxes.device, dtype=torch.long)
if num_insts == 0:
return id_map, labels
scores, bboxes = bboxes[:, -1], bboxes[:, :4]
# Sort by score to use heuristic fusion
order = torch.argsort(-scores)
bboxes = bboxes[order]
labels = labels[order]
segm_masks = masks[order]
instance_id = 1
left_labels = []
for idx in range(bboxes.shape[0]):
_cls = labels[idx]
_mask = segm_masks[idx]
instance_id_map = torch.ones_like(
_mask, dtype=torch.long) * instance_id
area = _mask.sum()
if area == 0:
continue
pasted = id_map > 0
intersect = (_mask * pasted).sum()
if (intersect / (area + 1e-5)) > overlap_thr:
continue
_part = _mask * (~pasted)
id_map = torch.where(_part, instance_id_map, id_map)
left_labels.append(_cls)
instance_id += 1
if len(left_labels) > 0:
instance_labels = torch.stack(left_labels)
else:
instance_labels = bboxes.new_zeros((0, ), dtype=torch.long)
assert instance_id == (len(instance_labels) + 1)
return id_map, instance_labels
def simple_test(self, det_bboxes, det_labels, mask_preds, seg_preds,
**kwargs):
"""Fuse the results of instance and semantic segmentations.
Args:
det_bboxes: The bboxes results, (K, 4).
det_labels: The labels of bboxes, (K,).
mask_preds: The masks results, (K, H, W).
seg_preds: The semantic segmentation results,
(K, num_stuff + 1, H, W).
Returns:
Tensor : The panoptic segmentation result, (H, W).
"""
mask_preds = mask_preds >= self.test_cfg.mask_thr_binary
id_map, labels = self._lay_masks(det_bboxes, det_labels, mask_preds,
self.test_cfg.mask_overlap)
seg_results = seg_preds.argmax(dim=0)
seg_results = seg_results + self.num_things_classes
pan_results = seg_results
instance_id = 1
for idx in range(det_labels.shape[0]):
_mask = id_map == (idx + 1)
if _mask.sum() == 0:
continue
_cls = labels[idx]
# simply trust detection
segment_id = _cls + instance_id * INSTANCE_OFFSET
pan_results[_mask] = segment_id
instance_id += 1
ids, counts = torch.unique(
pan_results % INSTANCE_OFFSET, return_counts=True)
stuff_ids = ids[ids >= self.num_things_classes]
stuff_counts = counts[ids >= self.num_things_classes]
ignore_stuff_ids = stuff_ids[
stuff_counts < self.test_cfg.stuff_area_limit]
assert pan_results.ndim == 2
pan_results[(pan_results.unsqueeze(2) == ignore_stuff_ids.reshape(
1, 1, -1)).any(dim=2)] = self.num_classes
return pan_results
| [
"[email protected]"
] | |
170eb1cf38678e8baf10258b548535244e7f2996 | 12df1e58fe493c4a929e6d54a938f9b357964701 | /Day-5 Closest Value in BST.py | 68d748a07e7b9618aa7fce5bd8d1b7190170c74e | [] | no_license | Anshul-Dagar/100-Day-Coding-Challenge | 132dadc50b572428c7e33ceda329770d8766965a | 33f10cc6357d4ca3fa8a16cc954f6559f39e73bb | refs/heads/main | 2023-02-12T04:04:12.389433 | 2021-01-09T13:56:36 | 2021-01-09T13:56:36 | 326,140,337 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,768 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 6 19:30:49 2021
@author: ironman
"""
class BST:
def __init__(self,value):
self.value=value
self.left=None
self.right=None
def insert(self,value):
currentnode=self
while True:
if value<currentnode.value:
if currentnode.left is None:
currentnode.left=BST(value)
break
else:
currentnode=currentnode.left
else:
if currentnode.right is None:
currentnode.right=BST(value)
break
else:
currentnode=currentnode.right
return self
def contain(self,value):
currentnode=self
while currentnode is not None:
if value<currentnode.value:
currentnode=currentnode.left
elif value>currentnode.value:
currentnode=currentnode.right
else:
return True
return False
def findClosestValueInBst(target,tree):
return findClosestValueInBstHelper(target,tree,float("inf"))
def findClosestValueInBstHelper(target,tree,closest):
currentnode=tree
while currentnode is not None:
if abs(target-closest)>abs(target-currentnode.value):
closest=currentnode.value
if target>currentnode.value:
currentnode=currentnode.right
elif target<currentnode.value:
currentnode=currentnode.left
else:
break
return closest
tree=BST(10)
tree.insert(5)
tree.insert(15)
tree.insert(2)
tree.insert(5)
tree.insert(1)
ans=findClosestValueInBst(9,tree)
print(ans) | [
"[email protected]"
] | |
d2e145a737723d90d40cb49ba1513f4ce09da229 | d0fcc2198f1caf5633c4fc0d004ba68714396f1b | /bc4py/utils.py | d1c4a85cb4d9f0df6c85fb081bee3a4001b51119 | [
"MIT"
] | permissive | webclinic017/bc4py | 4bfce04b666c2aaadda4b7ecc2a8270839231850 | 620b7d855ec957b3e2b4021cf8069d9dd128587a | refs/heads/master | 2022-12-09T22:23:49.842255 | 2019-06-21T14:24:17 | 2019-06-21T14:24:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,100 | py | from bc4py.config import C, V
from bc4py.gittool import get_current_branch
from bc4py.chain.utils import GompertzCurve
from Cryptodome.Cipher import AES
from Cryptodome import Random
from Cryptodome.Hash import SHA256
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from logging import getLogger, DEBUG, INFO, WARNING, ERROR
import multiprocessing
import os
import psutil
import sys
WALLET_VERSION = 0
log = getLogger('bc4py')
NAME2LEVEL = {
'DEBUG': DEBUG,
'INFO': INFO,
'WARNING': WARNING,
'ERROR': ERROR,
}
def set_database_path(sub_dir=None):
V.DB_HOME_DIR = os.path.join(os.path.expanduser("~"), 'blockchain-py')
if not os.path.exists(V.DB_HOME_DIR):
os.makedirs(V.DB_HOME_DIR)
if sub_dir:
V.DB_HOME_DIR = os.path.join(V.DB_HOME_DIR, sub_dir)
if not os.path.exists(V.DB_HOME_DIR):
os.makedirs(V.DB_HOME_DIR)
V.DB_ACCOUNT_PATH = os.path.join(V.DB_HOME_DIR, 'wallet.ver{}.dat'.format(WALLET_VERSION))
def set_blockchain_params(genesis_block, params):
assert 'spawn' in multiprocessing.get_all_start_methods(), 'Not found spawn method'
V.GENESIS_BLOCK = genesis_block
V.GENESIS_PARAMS = params
V.BECH32_HRP = params.get('hrp')
V.BLOCK_GENESIS_TIME = params.get('genesis_time')
V.BLOCK_MINING_SUPPLY = params.get('mining_supply')
V.BLOCK_TIME_SPAN = params.get('block_span')
V.BLOCK_REWARD = params.get('block_reward')
V.COIN_DIGIT = params.get('digit_number')
V.COIN_MINIMUM_PRICE = params.get('minimum_price')
V.BLOCK_CONSENSUSES = params.get('consensus')
GompertzCurve.k = V.BLOCK_MINING_SUPPLY
V.BRANCH_NAME = get_current_branch()
def check_already_started():
assert V.DB_HOME_DIR is not None
# check already started
pid_path = os.path.join(V.DB_HOME_DIR, 'pid.lock')
if os.path.exists(pid_path):
with open(pid_path, mode='r') as fp:
pid = int(fp.read())
if psutil.pid_exists(pid):
raise RuntimeError('Already running blockchain-py pid={}'.format(pid))
new_pid = os.getpid()
with open(pid_path, mode='w') as fp:
fp.write(str(new_pid))
log.info("create new process lock file pid={}".format(new_pid))
def console_args_parser():
"""get help by `python publicnode.py -h`"""
p = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
p.add_argument('--p2p',
help='p2p server bind port',
default=2000,
type=int)
p.add_argument('--rest',
help='REST API bind port',
default=3000,
type=int)
p.add_argument('--host',
help='REST API bind host',
default='127.0.0.1',
type=str)
p.add_argument('--user', '-u',
help='API user name',
default='user',
type=str)
p.add_argument('--password', '-p',
help='API password',
default='password',
type=str)
p.add_argument('--sub-dir',
help='setup blockchain folder path',
default=None)
p.add_argument('--log-level',
help='logging level',
choices=list(NAME2LEVEL),
default='INFO')
p.add_argument('--log-path',
help='recode log file path',
default=None,
type=str)
p.add_argument('--remove-log',
help='remove old log file when start program',
action='store_true')
p.add_argument('--daemon',
help='make process daemon',
action='store_true')
p.add_argument('--staking',
help='enable coin base staking',
action='store_true')
p.add_argument('--solo-mining',
help='solo mining for debug or testnet',
action='store_true')
return p.parse_args()
def check_process_status(f_daemon):
if sys.platform == 'win32':
# windows
if f_daemon:
if sys.executable.endswith("pythonw.exe"):
sys.stdout = open(os.devnull, "w")
sys.stderr = open(os.devnull, "w")
else:
print("ERROR: Please execute by `pythonw.exe` not `python.exe` if you enable daemon flag")
sys.exit()
else:
if sys.executable.endswith("pythonw.exe"):
print("ERROR: Please execute by `python.exe`")
sys.exit()
else:
# stdin close to prevent lock on console
sys.stdin.close()
else:
# other
if f_daemon:
pid = os.fork()
if pid == 0:
# child process (daemon)
sys.stdout = open(os.devnull, "w")
sys.stderr = open(os.devnull, "w")
else:
# main process
print("INFO: Make daemon process pid={}".format(pid))
sys.exit()
else:
# stdin close to prevent lock on console
sys.stdin.close()
class AESCipher:
@staticmethod
def create_key():
return os.urandom(AES.block_size)
@staticmethod
def encrypt(key, raw):
assert isinstance(key, bytes)
assert isinstance(raw, bytes), "input data is bytes"
key = SHA256.new(key).digest()[:AES.block_size]
raw = AESCipher._pad(raw)
iv = Random.new().read(AES.block_size)
cipher = AES.new(key, AES.MODE_CBC, iv)
return iv + cipher.encrypt(raw)
@staticmethod
def decrypt(key, enc):
assert isinstance(key, bytes)
assert isinstance(enc, bytes), 'Encrypt data is bytes'
key = SHA256.new(key).digest()[:AES.block_size]
iv = enc[:AES.block_size]
cipher = AES.new(key, AES.MODE_CBC, iv)
raw = AESCipher._unpad(cipher.decrypt(enc[AES.block_size:]))
if len(raw) == 0:
raise ValueError("AES decryption error, not correct key")
else:
return raw
@staticmethod
def _pad(s):
pad = AES.block_size - len(s) % AES.block_size
add = AES.block_size - len(s) % AES.block_size
return s + add * pad.to_bytes(1, 'little')
@staticmethod
def _unpad(s):
return s[:-ord(s[len(s) - 1:])]
class ProgressBar:
"""
terminal progressbar
original: https://github.com/bozoh/console_progressbar
author: Carlos Alexandre S. da Fonseca
"""
def __init__(self, prefix, default_suffix='', total=100, decimals=0, length=50, fill='X', zfill='-'):
self.prefix = prefix
self.default_suffix = default_suffix
self.__decimals = decimals
self.__length = length
self.__fill = fill
self.__zfill = zfill
self.__total = total
def _generate_bar(self, iteration, suffix=None):
percent = ("{0:." + str(self.__decimals) + "f}")
percent = percent.format(100 * (iteration / float(self.__total)))
filled_length = int(self.__length * iteration // self.__total)
bar = self.__fill * filled_length + self.__zfill * (self.__length - filled_length)
return '{0} |{1}| {2}% {3}'.format(self.prefix, bar, percent, suffix or self.default_suffix)
def print_progress_bar(self, iteration, suffix=None):
print('\r%s' % (self._generate_bar(iteration, suffix)), end='')
sys.stdout.flush()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None:
self.print_progress_bar(self.__total, 'Complete')
print()
else:
print()
sys.stdout.flush()
log.error('Error on progress, {}'.format(exc_val))
return True
__all__ = [
"set_database_path",
"set_blockchain_params",
"check_already_started",
"console_args_parser",
"check_process_status",
"AESCipher",
"ProgressBar",
]
| [
"[email protected]"
] | |
9c435a42cdc60fb08b9624fc926efccf8f66c4b1 | 5da5473ff3026165a47f98744bac82903cf008e0 | /packages/google-cloud-alloydb/google/cloud/alloydb_v1/services/alloy_db_admin/transports/grpc_asyncio.py | 1472c7e2d2d253937db5f0bc67d4dde86f67efb8 | [
"Apache-2.0"
] | permissive | googleapis/google-cloud-python | ed61a5f03a476ab6053870f4da7bc5534e25558b | 93c4e63408c65129422f65217325f4e7d41f7edf | refs/heads/main | 2023-09-04T09:09:07.852632 | 2023-08-31T22:49:26 | 2023-08-31T22:49:26 | 16,316,451 | 2,792 | 917 | Apache-2.0 | 2023-09-14T21:45:18 | 2014-01-28T15:51:47 | Python | UTF-8 | Python | false | false | 50,442 | py | # -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
import warnings
from google.api_core import gapic_v1, grpc_helpers_async, operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.cloud.location import locations_pb2 # type: ignore
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.alloydb_v1.types import resources, service
from .base import DEFAULT_CLIENT_INFO, AlloyDBAdminTransport
from .grpc import AlloyDBAdminGrpcTransport
class AlloyDBAdminGrpcAsyncIOTransport(AlloyDBAdminTransport):
"""gRPC AsyncIO backend transport for AlloyDBAdmin.
Service describing handlers for resources
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "alloydb.googleapis.com",
credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "alloydb.googleapis.com",
credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: Optional[aio.Channel] = None,
api_mtls_endpoint: Optional[str] = None,
client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None,
client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
api_audience: Optional[str] = None,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
api_audience=api_audience,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def list_clusters(
self,
) -> Callable[
[service.ListClustersRequest], Awaitable[service.ListClustersResponse]
]:
r"""Return a callable for the list clusters method over gRPC.
Lists Clusters in a given project and location.
Returns:
Callable[[~.ListClustersRequest],
Awaitable[~.ListClustersResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_clusters" not in self._stubs:
self._stubs["list_clusters"] = self.grpc_channel.unary_unary(
"/google.cloud.alloydb.v1.AlloyDBAdmin/ListClusters",
request_serializer=service.ListClustersRequest.serialize,
response_deserializer=service.ListClustersResponse.deserialize,
)
return self._stubs["list_clusters"]
@property
def get_cluster(
self,
) -> Callable[[service.GetClusterRequest], Awaitable[resources.Cluster]]:
r"""Return a callable for the get cluster method over gRPC.
Gets details of a single Cluster.
Returns:
Callable[[~.GetClusterRequest],
Awaitable[~.Cluster]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_cluster" not in self._stubs:
self._stubs["get_cluster"] = self.grpc_channel.unary_unary(
"/google.cloud.alloydb.v1.AlloyDBAdmin/GetCluster",
request_serializer=service.GetClusterRequest.serialize,
response_deserializer=resources.Cluster.deserialize,
)
return self._stubs["get_cluster"]
@property
def create_cluster(
self,
) -> Callable[[service.CreateClusterRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the create cluster method over gRPC.
Creates a new Cluster in a given project and
location.
Returns:
Callable[[~.CreateClusterRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_cluster" not in self._stubs:
self._stubs["create_cluster"] = self.grpc_channel.unary_unary(
"/google.cloud.alloydb.v1.AlloyDBAdmin/CreateCluster",
request_serializer=service.CreateClusterRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_cluster"]
@property
def update_cluster(
self,
) -> Callable[[service.UpdateClusterRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the update cluster method over gRPC.
Updates the parameters of a single Cluster.
Returns:
Callable[[~.UpdateClusterRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_cluster" not in self._stubs:
self._stubs["update_cluster"] = self.grpc_channel.unary_unary(
"/google.cloud.alloydb.v1.AlloyDBAdmin/UpdateCluster",
request_serializer=service.UpdateClusterRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["update_cluster"]
@property
def delete_cluster(
self,
) -> Callable[[service.DeleteClusterRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the delete cluster method over gRPC.
Deletes a single Cluster.
Returns:
Callable[[~.DeleteClusterRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_cluster" not in self._stubs:
self._stubs["delete_cluster"] = self.grpc_channel.unary_unary(
"/google.cloud.alloydb.v1.AlloyDBAdmin/DeleteCluster",
request_serializer=service.DeleteClusterRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_cluster"]
@property
def promote_cluster(
self,
) -> Callable[[service.PromoteClusterRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the promote cluster method over gRPC.
Promotes a SECONDARY cluster. This turns down
replication from the PRIMARY cluster and promotes a
secondary cluster into its own standalone cluster.
Imperative only.
Returns:
Callable[[~.PromoteClusterRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "promote_cluster" not in self._stubs:
self._stubs["promote_cluster"] = self.grpc_channel.unary_unary(
"/google.cloud.alloydb.v1.AlloyDBAdmin/PromoteCluster",
request_serializer=service.PromoteClusterRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["promote_cluster"]
@property
def restore_cluster(
self,
) -> Callable[[service.RestoreClusterRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the restore cluster method over gRPC.
Creates a new Cluster in a given project and
location, with a volume restored from the provided
source, either a backup ID or a point-in-time and a
source cluster.
Returns:
Callable[[~.RestoreClusterRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "restore_cluster" not in self._stubs:
self._stubs["restore_cluster"] = self.grpc_channel.unary_unary(
"/google.cloud.alloydb.v1.AlloyDBAdmin/RestoreCluster",
request_serializer=service.RestoreClusterRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["restore_cluster"]
@property
def create_secondary_cluster(
self,
) -> Callable[
[service.CreateSecondaryClusterRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the create secondary cluster method over gRPC.
Creates a cluster of type SECONDARY in the given
location using the primary cluster as the source.
Returns:
Callable[[~.CreateSecondaryClusterRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_secondary_cluster" not in self._stubs:
self._stubs["create_secondary_cluster"] = self.grpc_channel.unary_unary(
"/google.cloud.alloydb.v1.AlloyDBAdmin/CreateSecondaryCluster",
request_serializer=service.CreateSecondaryClusterRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_secondary_cluster"]
@property
def list_instances(
self,
) -> Callable[
[service.ListInstancesRequest], Awaitable[service.ListInstancesResponse]
]:
r"""Return a callable for the list instances method over gRPC.
Lists Instances in a given project and location.
Returns:
Callable[[~.ListInstancesRequest],
Awaitable[~.ListInstancesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_instances" not in self._stubs:
self._stubs["list_instances"] = self.grpc_channel.unary_unary(
"/google.cloud.alloydb.v1.AlloyDBAdmin/ListInstances",
request_serializer=service.ListInstancesRequest.serialize,
response_deserializer=service.ListInstancesResponse.deserialize,
)
return self._stubs["list_instances"]
@property
def get_instance(
self,
) -> Callable[[service.GetInstanceRequest], Awaitable[resources.Instance]]:
r"""Return a callable for the get instance method over gRPC.
Gets details of a single Instance.
Returns:
Callable[[~.GetInstanceRequest],
Awaitable[~.Instance]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_instance" not in self._stubs:
self._stubs["get_instance"] = self.grpc_channel.unary_unary(
"/google.cloud.alloydb.v1.AlloyDBAdmin/GetInstance",
request_serializer=service.GetInstanceRequest.serialize,
response_deserializer=resources.Instance.deserialize,
)
return self._stubs["get_instance"]
@property
def create_instance(
self,
) -> Callable[[service.CreateInstanceRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the create instance method over gRPC.
Creates a new Instance in a given project and
location.
Returns:
Callable[[~.CreateInstanceRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_instance" not in self._stubs:
self._stubs["create_instance"] = self.grpc_channel.unary_unary(
"/google.cloud.alloydb.v1.AlloyDBAdmin/CreateInstance",
request_serializer=service.CreateInstanceRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_instance"]
@property
def create_secondary_instance(
self,
) -> Callable[
[service.CreateSecondaryInstanceRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the create secondary instance method over gRPC.
Creates a new SECONDARY Instance in a given project
and location.
Returns:
Callable[[~.CreateSecondaryInstanceRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_secondary_instance" not in self._stubs:
self._stubs["create_secondary_instance"] = self.grpc_channel.unary_unary(
"/google.cloud.alloydb.v1.AlloyDBAdmin/CreateSecondaryInstance",
request_serializer=service.CreateSecondaryInstanceRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_secondary_instance"]
@property
def batch_create_instances(
self,
) -> Callable[
[service.BatchCreateInstancesRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the batch create instances method over gRPC.
Creates new instances under the given project,
location and cluster. There can be only one primary
instance in a cluster. If the primary instance exists in
the cluster as well as this request, then API will throw
an error.
The primary instance should exist before any read pool
instance is created. If the primary instance is a part
of the request payload, then the API will take care of
creating instances in the correct order. This method is
here to support Google-internal use cases, and is not
meant for external customers to consume. Please do not
start relying on it; its behavior is subject to change
without notice.
Returns:
Callable[[~.BatchCreateInstancesRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "batch_create_instances" not in self._stubs:
self._stubs["batch_create_instances"] = self.grpc_channel.unary_unary(
"/google.cloud.alloydb.v1.AlloyDBAdmin/BatchCreateInstances",
request_serializer=service.BatchCreateInstancesRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["batch_create_instances"]
@property
def update_instance(
self,
) -> Callable[[service.UpdateInstanceRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the update instance method over gRPC.
Updates the parameters of a single Instance.
Returns:
Callable[[~.UpdateInstanceRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_instance" not in self._stubs:
self._stubs["update_instance"] = self.grpc_channel.unary_unary(
"/google.cloud.alloydb.v1.AlloyDBAdmin/UpdateInstance",
request_serializer=service.UpdateInstanceRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["update_instance"]
@property
def delete_instance(
self,
) -> Callable[[service.DeleteInstanceRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the delete instance method over gRPC.
Deletes a single Instance.
Returns:
Callable[[~.DeleteInstanceRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_instance" not in self._stubs:
self._stubs["delete_instance"] = self.grpc_channel.unary_unary(
"/google.cloud.alloydb.v1.AlloyDBAdmin/DeleteInstance",
request_serializer=service.DeleteInstanceRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_instance"]
@property
def failover_instance(
self,
) -> Callable[
[service.FailoverInstanceRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the failover instance method over gRPC.
Forces a Failover for a highly available instance.
Failover promotes the HA standby instance as the new
primary. Imperative only.
Returns:
Callable[[~.FailoverInstanceRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "failover_instance" not in self._stubs:
self._stubs["failover_instance"] = self.grpc_channel.unary_unary(
"/google.cloud.alloydb.v1.AlloyDBAdmin/FailoverInstance",
request_serializer=service.FailoverInstanceRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["failover_instance"]
@property
def inject_fault(
self,
) -> Callable[[service.InjectFaultRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the inject fault method over gRPC.
Injects fault in an instance.
Imperative only.
Returns:
Callable[[~.InjectFaultRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "inject_fault" not in self._stubs:
self._stubs["inject_fault"] = self.grpc_channel.unary_unary(
"/google.cloud.alloydb.v1.AlloyDBAdmin/InjectFault",
request_serializer=service.InjectFaultRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["inject_fault"]
@property
def restart_instance(
self,
) -> Callable[
[service.RestartInstanceRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the restart instance method over gRPC.
Restart an Instance in a cluster.
Imperative only.
Returns:
Callable[[~.RestartInstanceRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "restart_instance" not in self._stubs:
self._stubs["restart_instance"] = self.grpc_channel.unary_unary(
"/google.cloud.alloydb.v1.AlloyDBAdmin/RestartInstance",
request_serializer=service.RestartInstanceRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["restart_instance"]
@property
def list_backups(
self,
) -> Callable[[service.ListBackupsRequest], Awaitable[service.ListBackupsResponse]]:
r"""Return a callable for the list backups method over gRPC.
Lists Backups in a given project and location.
Returns:
Callable[[~.ListBackupsRequest],
Awaitable[~.ListBackupsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_backups" not in self._stubs:
self._stubs["list_backups"] = self.grpc_channel.unary_unary(
"/google.cloud.alloydb.v1.AlloyDBAdmin/ListBackups",
request_serializer=service.ListBackupsRequest.serialize,
response_deserializer=service.ListBackupsResponse.deserialize,
)
return self._stubs["list_backups"]
@property
def get_backup(
self,
) -> Callable[[service.GetBackupRequest], Awaitable[resources.Backup]]:
r"""Return a callable for the get backup method over gRPC.
Gets details of a single Backup.
Returns:
Callable[[~.GetBackupRequest],
Awaitable[~.Backup]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_backup" not in self._stubs:
self._stubs["get_backup"] = self.grpc_channel.unary_unary(
"/google.cloud.alloydb.v1.AlloyDBAdmin/GetBackup",
request_serializer=service.GetBackupRequest.serialize,
response_deserializer=resources.Backup.deserialize,
)
return self._stubs["get_backup"]
@property
def create_backup(
self,
) -> Callable[[service.CreateBackupRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the create backup method over gRPC.
Creates a new Backup in a given project and location.
Returns:
Callable[[~.CreateBackupRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_backup" not in self._stubs:
self._stubs["create_backup"] = self.grpc_channel.unary_unary(
"/google.cloud.alloydb.v1.AlloyDBAdmin/CreateBackup",
request_serializer=service.CreateBackupRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_backup"]
@property
def update_backup(
self,
) -> Callable[[service.UpdateBackupRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the update backup method over gRPC.
Updates the parameters of a single Backup.
Returns:
Callable[[~.UpdateBackupRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_backup" not in self._stubs:
self._stubs["update_backup"] = self.grpc_channel.unary_unary(
"/google.cloud.alloydb.v1.AlloyDBAdmin/UpdateBackup",
request_serializer=service.UpdateBackupRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["update_backup"]
@property
def delete_backup(
self,
) -> Callable[[service.DeleteBackupRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the delete backup method over gRPC.
Deletes a single Backup.
Returns:
Callable[[~.DeleteBackupRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_backup" not in self._stubs:
self._stubs["delete_backup"] = self.grpc_channel.unary_unary(
"/google.cloud.alloydb.v1.AlloyDBAdmin/DeleteBackup",
request_serializer=service.DeleteBackupRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_backup"]
@property
def list_supported_database_flags(
self,
) -> Callable[
[service.ListSupportedDatabaseFlagsRequest],
Awaitable[service.ListSupportedDatabaseFlagsResponse],
]:
r"""Return a callable for the list supported database flags method over gRPC.
Lists SupportedDatabaseFlags for a given project and
location.
Returns:
Callable[[~.ListSupportedDatabaseFlagsRequest],
Awaitable[~.ListSupportedDatabaseFlagsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_supported_database_flags" not in self._stubs:
self._stubs[
"list_supported_database_flags"
] = self.grpc_channel.unary_unary(
"/google.cloud.alloydb.v1.AlloyDBAdmin/ListSupportedDatabaseFlags",
request_serializer=service.ListSupportedDatabaseFlagsRequest.serialize,
response_deserializer=service.ListSupportedDatabaseFlagsResponse.deserialize,
)
return self._stubs["list_supported_database_flags"]
@property
def list_users(
self,
) -> Callable[[service.ListUsersRequest], Awaitable[service.ListUsersResponse]]:
r"""Return a callable for the list users method over gRPC.
Lists Users in a given project and location.
Returns:
Callable[[~.ListUsersRequest],
Awaitable[~.ListUsersResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_users" not in self._stubs:
self._stubs["list_users"] = self.grpc_channel.unary_unary(
"/google.cloud.alloydb.v1.AlloyDBAdmin/ListUsers",
request_serializer=service.ListUsersRequest.serialize,
response_deserializer=service.ListUsersResponse.deserialize,
)
return self._stubs["list_users"]
@property
def get_user(self) -> Callable[[service.GetUserRequest], Awaitable[resources.User]]:
r"""Return a callable for the get user method over gRPC.
Gets details of a single User.
Returns:
Callable[[~.GetUserRequest],
Awaitable[~.User]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_user" not in self._stubs:
self._stubs["get_user"] = self.grpc_channel.unary_unary(
"/google.cloud.alloydb.v1.AlloyDBAdmin/GetUser",
request_serializer=service.GetUserRequest.serialize,
response_deserializer=resources.User.deserialize,
)
return self._stubs["get_user"]
@property
def create_user(
self,
) -> Callable[[service.CreateUserRequest], Awaitable[resources.User]]:
r"""Return a callable for the create user method over gRPC.
Creates a new User in a given project, location, and
cluster.
Returns:
Callable[[~.CreateUserRequest],
Awaitable[~.User]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_user" not in self._stubs:
self._stubs["create_user"] = self.grpc_channel.unary_unary(
"/google.cloud.alloydb.v1.AlloyDBAdmin/CreateUser",
request_serializer=service.CreateUserRequest.serialize,
response_deserializer=resources.User.deserialize,
)
return self._stubs["create_user"]
@property
def update_user(
self,
) -> Callable[[service.UpdateUserRequest], Awaitable[resources.User]]:
r"""Return a callable for the update user method over gRPC.
Updates the parameters of a single User.
Returns:
Callable[[~.UpdateUserRequest],
Awaitable[~.User]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_user" not in self._stubs:
self._stubs["update_user"] = self.grpc_channel.unary_unary(
"/google.cloud.alloydb.v1.AlloyDBAdmin/UpdateUser",
request_serializer=service.UpdateUserRequest.serialize,
response_deserializer=resources.User.deserialize,
)
return self._stubs["update_user"]
@property
def delete_user(
self,
) -> Callable[[service.DeleteUserRequest], Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the delete user method over gRPC.
Deletes a single User.
Returns:
Callable[[~.DeleteUserRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_user" not in self._stubs:
self._stubs["delete_user"] = self.grpc_channel.unary_unary(
"/google.cloud.alloydb.v1.AlloyDBAdmin/DeleteUser",
request_serializer=service.DeleteUserRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_user"]
def close(self):
return self.grpc_channel.close()
@property
def delete_operation(
self,
) -> Callable[[operations_pb2.DeleteOperationRequest], None]:
r"""Return a callable for the delete_operation method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_operation" not in self._stubs:
self._stubs["delete_operation"] = self.grpc_channel.unary_unary(
"/google.longrunning.Operations/DeleteOperation",
request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString,
response_deserializer=None,
)
return self._stubs["delete_operation"]
@property
def cancel_operation(
self,
) -> Callable[[operations_pb2.CancelOperationRequest], None]:
r"""Return a callable for the cancel_operation method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "cancel_operation" not in self._stubs:
self._stubs["cancel_operation"] = self.grpc_channel.unary_unary(
"/google.longrunning.Operations/CancelOperation",
request_serializer=operations_pb2.CancelOperationRequest.SerializeToString,
response_deserializer=None,
)
return self._stubs["cancel_operation"]
@property
def get_operation(
self,
) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]:
r"""Return a callable for the get_operation method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_operation" not in self._stubs:
self._stubs["get_operation"] = self.grpc_channel.unary_unary(
"/google.longrunning.Operations/GetOperation",
request_serializer=operations_pb2.GetOperationRequest.SerializeToString,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["get_operation"]
@property
def list_operations(
self,
) -> Callable[
[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse
]:
r"""Return a callable for the list_operations method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_operations" not in self._stubs:
self._stubs["list_operations"] = self.grpc_channel.unary_unary(
"/google.longrunning.Operations/ListOperations",
request_serializer=operations_pb2.ListOperationsRequest.SerializeToString,
response_deserializer=operations_pb2.ListOperationsResponse.FromString,
)
return self._stubs["list_operations"]
@property
def list_locations(
self,
) -> Callable[
[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse
]:
r"""Return a callable for the list locations method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_locations" not in self._stubs:
self._stubs["list_locations"] = self.grpc_channel.unary_unary(
"/google.cloud.location.Locations/ListLocations",
request_serializer=locations_pb2.ListLocationsRequest.SerializeToString,
response_deserializer=locations_pb2.ListLocationsResponse.FromString,
)
return self._stubs["list_locations"]
@property
def get_location(
self,
) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]:
r"""Return a callable for the list locations method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_location" not in self._stubs:
self._stubs["get_location"] = self.grpc_channel.unary_unary(
"/google.cloud.location.Locations/GetLocation",
request_serializer=locations_pb2.GetLocationRequest.SerializeToString,
response_deserializer=locations_pb2.Location.FromString,
)
return self._stubs["get_location"]
__all__ = ("AlloyDBAdminGrpcAsyncIOTransport",)
| [
"[email protected]"
] | |
98952fb10cb1ca024f54f5d5c0ed1a7a188ee2fe | e70d8a72a793943418943cbb9ac9466338a62e68 | /env/bin/jupyter | 9d2f3a6e4c45876082d250a3642c0b2d736606e3 | [] | no_license | smwitkowski/HHH-Roles | 8209d690eaa5879224dd36d3aa784f6ee9ead866 | b7d6fcb3896ed72b897feae1ee94ccf258f0b897 | refs/heads/master | 2022-10-28T05:21:48.790671 | 2019-06-01T23:58:42 | 2019-06-01T23:58:42 | 189,666,309 | 0 | 1 | null | 2022-10-22T08:38:30 | 2019-05-31T22:39:22 | Jupyter Notebook | UTF-8 | Python | false | false | 281 | #!/Users/user/Documents/Projects/python-virtual-environments/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from jupyter_core.command import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
56141ab1f36a5991fdb4b13da10601235ea38066 | 24298abd2af582ed31fb38ec4c68d36dd7bf8598 | /estimate.py | a7dd4761aa713e8099d3c5270771902e5535e903 | [] | no_license | csvetnicka/csvetnicka.github.io | a130c3f8098c0621f547abe84e8cce0a305874b7 | 32703c0955779928b8d728c7973d30d5340081c3 | refs/heads/master | 2020-06-13T20:22:37.320479 | 2016-12-04T20:39:16 | 2016-12-04T20:39:16 | 75,556,943 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31 | py | print(300 * 40 * 30 * 4 * 'a')
| [
"[email protected]"
] | |
d7df952db203ae0eae7c815bea836cbbaa26d296 | 08f2419edcbd1464ba4496e8cc40589e19401b90 | /kfk/main.py | ec8315f2900073a3ca6ac54e32d467814b17f13e | [
"Apache-2.0"
] | permissive | alainlompo/strimzi-kafka-cli | 6bcbff4925b6673299261e0d2a526c57823bca32 | f4ed19b849fdd9b59274eab96fc29fab3aa38e0f | refs/heads/master | 2023-09-02T22:05:47.765551 | 2021-11-15T07:42:28 | 2021-11-15T07:42:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 515 | py | from kfk.commands.main import kfk
from kfk.commands.clusters import clusters
from kfk.commands.topics import topics
from kfk.commands.users import users
from kfk.commands.console import console_consumer, console_producer
from kfk.commands.configs import configs
from kfk.commands.acls import acls
from kfk.commands.operator import operator
from kfk.commands.env import env
from kfk.commands.connect.clusters import clusters
from kfk.commands.connect.connectors import connectors
from kfk.setup import setup
setup() | [
"[email protected]"
] | |
f94ebff30078394b605fc47632cbcc1df7200fb5 | 971fd54845868a4db46ae7e5802c0f584acf0242 | /mining/test/learning/Spotify.py | 80e393af2bfa32385ffe23ece95b941c81a02a6d | [] | no_license | monkeyboiii/eastmoney | 017f2e2a9c42865930ca44748660b0dee2f251e4 | dde99651be2135022fa41cf0871c923bbaf5b8ee | refs/heads/master | 2022-04-22T19:45:35.719068 | 2020-04-13T01:35:26 | 2020-04-13T01:35:26 | 255,081,415 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | import requests
url = "https://api.spotify.com/v1/search?q=Trap&type=track&market=US"
payload = {}
headers = {
'Authorization': 'Bearer '
'BQC6cJmlEkhTe5U53eGHOdYwQOLTNq8_PgHATFWVD9O9pz-eH9peX17hQ7R_6n0XK8RKy2UzCeNv8ikM9cZtE0pbJnXm_HOP-kdZk1b1FYQGmOhArgfN5VEUt6taQnNrEJtdvp85kRWdJEa1M7QJ3pY0i8VPCH727yE'
}
response = requests.request("GET", url, headers=headers, data=payload)
file = open('SpotifyTemp.txt', mode='w')
file.write(response.text)
| [
"[email protected]"
] | |
3dbad78ba483ba4a076743f3dc60241739f36cc7 | dbf556a2085c45498f23d15a719b1a96b840ef8d | /Simple/LinkedListCycle.py | d8a483b4213f98ce897b4b124fba93cad96f2a4f | [] | no_license | FrancisFan98/Leetcode- | 43cf46d88d8927848bf47a0a11aabdc1f9d8f628 | a9b7e1e890417ae7352cc98bccddee27ce657b0f | refs/heads/master | 2021-07-12T17:55:22.119661 | 2020-05-27T02:17:29 | 2020-05-27T02:17:29 | 142,628,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 613 | py | # Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def hasCycle(self, head):
"""
:type head: ListNode
:rtype: bool
"""
cur = head
if head == None or head.next == None:
return False
while cur != None :
nex = cur.next
cur.next = cur
if nex == None:
return False
if nex.next == cur:
break
cur = nex
return True
| [
"[email protected]"
] | |
6ddb8e3c2e4fe29b69b14ca4ef76e926ce5243b2 | fd9dd0b4aea0f55e2b35529462bf5fa7bd4f9d33 | /test.py | 85cc06087dbb4615d77c4fd7382cdab9d535dd14 | [] | no_license | mraines4/Python_wk1 | 833a48f6576cb2130c02516c69a537a1a4e0f158 | 88bb07b706a67254d795e616412b2baf70217e6c | refs/heads/master | 2020-04-23T16:05:43.731860 | 2019-02-19T20:28:39 | 2019-02-19T20:28:39 | 171,286,541 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 87 | py | user = input('type number ')
if user.isdigit():
print('yes')
else:
print('no') | [
"[email protected]"
] | |
4bb323f332c4b4171eb8fc81e644fc306a720eff | e9bfbb32f8e682afae8505fb45e32571ad866080 | /Drills/Drill-05/move_character_with_mouse.py | 49a36e0695819c4f5386fd35730a7181f5c93e2f | [] | no_license | msbk427/2014182020-2DGP | 4f0834bcb2320b56bff1e1eec8d2f24baa40a555 | 90bcaf0027d73d4c819d0d2399ac664f70081541 | refs/heads/master | 2020-08-05T23:00:51.053010 | 2019-12-02T03:18:38 | 2019-12-02T03:18:38 | 212,746,358 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,375 | py | from pico2d import *
KPU_WIDTH, KPU_HEIGHT = 1280, 1024
def handle_events():
global running
global x, y
global cx, cy
events = get_events()
for event in events:
if event.type == SDL_QUIT:
running = False
elif event.type == SDL_MOUSEMOTION:
x, y = event.x, KPU_HEIGHT - 1 - event.y
elif event.type == SDL_MOUSEBUTTONDOWN:
cx, cy = event.x, KPU_HEIGHT - 1 - event.y
elif event.type == SDL_KEYDOWN and event.key == SDLK_ESCAPE:
running = False
open_canvas(KPU_WIDTH, KPU_HEIGHT)
kpu_ground = load_image('KPU_GROUND.png')
character = load_image('animation_sheet.png')
cursor = load_image('hand_arrow.png')
running = True
x, y = KPU_WIDTH // 2, KPU_HEIGHT // 2
cx, cy = KPU_WIDTH // 2, KPU_HEIGHT // 2
ci_x, ci_y = KPU_WIDTH // 2, KPU_HEIGHT // 2
frame = 0
i, t = 0, 0
direction = 1
hide_cursor()
while running:
clear_canvas()
kpu_ground.draw(KPU_WIDTH // 2, KPU_HEIGHT // 2)
cursor.draw(x, y)
if ci_x >= cx:
direction = 0
else:
direction = 1
i = (i + 1) % 100
t = i / 100
ci_x = (1 - t) * ci_x + t * cx
ci_y = (1 - t) * ci_y + t * cy
character.clip_draw(frame * 100, 100 * direction, 100, 100, ci_x, ci_y)
update_canvas()
handle_events()
frame = (frame + 1) % 8
delay(0.05)
close_canvas()
| [
"[email protected]"
] | |
1ba209054c0782d87286628966586b58f76b2eec | 15855ce729e78fa0628d0e5a774b7fcaff7acc85 | /pythonProject/Dictionary (Key value pair collections)/DictionaryDemo3.py | ec0c92f9d429a3b35decc30530fd050e28c1a186 | [] | no_license | nikhil-shukla/GitDemo | d8c63aec6978aed251c0a4df3c5b4aacef702735 | f060716815f9ba1025ce8fc525dd10e9ddc0b2dc | refs/heads/master | 2023-07-08T22:48:26.439978 | 2021-08-16T13:25:18 | 2021-08-16T13:25:18 | 396,787,624 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | #Syntax 2 - using dictonary constructor
emp=dict(QA="nikhil",Dev="ak",qa="python",Qa="cpyhton")
print(emp)
#Syntax 3 - using dictonary constructor and passing values as tuples
newemp=dict([(1,"Python"),(2,"Java"),(3,"JS")])
print(newemp) | [
"[email protected]"
] | |
9f6f269323eea9a6b3519445a652ffba91b0c580 | ea79bd5a2d2a15f1d8ca12b4dede02934305bc28 | /approcPatternCount.py | 884e0d55218f72c8bbfa2813ecb1d3d45a704fc0 | [] | no_license | jtt48/Coursera | 54b1b29561eb3a362c261d55fc4d12cde3de9590 | 69366264652d7ee0688af3538c0848910b93c8d0 | refs/heads/master | 2020-03-25T05:30:35.384253 | 2018-08-03T17:23:01 | 2018-08-03T17:23:01 | 143,450,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,350 | py | import sys
import re
import json
import glob
import linecache
# cycle window od size (len pattern) through text.
# if window is <= errorNmb place start index in rString.
def approxPatternCount(pattern, text, errorNmb):
# print("starting approxPatternMatch")
# print("Pattern: ", pattern, " Matching to: ", text)
count = 0
rstring = 0
window = len(pattern)
for i in range(len(text)):
if i == len(text) - ((len(pattern) - 1)):
break
for j in range(len(pattern)):
# if j == len(pattern) - 1:
# break
# print(pattern[j], text[i+j])
if pattern[j] != text[i+j]:
count = count + 1
# print(count, pattern, text[i:i+len(pattern)])
if count <= int(errorNmb):
#print(count <= errorNmb)
#print("I: ",i, "Count: ", count, "Error Nmb: ", errorNmb)
rstring = rstring + 1
count = 0
# print(" approxPatternMatch: ",rstring)
return rstring
if __name__ == "__main__":
with open(sys.argv[1], "r+") as input:
with open("output.txt", "w+") as output:
param = input.read().splitlines()
pattern = param[0]
text = param[1]
d = param[2]
output.write(str(approxPatternCount(pattern,text,d)))
| [
"[email protected]"
] | |
8d8566234ea438d492afb7febfec544f5211e99a | e31c5eecfed4502e22c810e798003a40082c1279 | /collab_app/models/profile.py | b0f8014e1dce8dfdda01aa008560123c152c54c1 | [] | no_license | CollabSauce/collab-backend | 96367dedce7d1b64d8921b74e5e22706384d4624 | b8726c8ff43d8995415b7ac81b38bc6973943c50 | refs/heads/master | 2023-02-04T07:07:36.836601 | 2020-12-24T19:48:45 | 2020-12-24T19:48:45 | 290,001,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | from django.conf import settings
from django.db import models
from collab_app.mixins.models import BaseModel
class Profile(BaseModel):
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
| [
"[email protected]"
] | |
8c9fc38f1538d4919a49ff72447c1c932fc03db5 | 0c0ec63b2dc805be3affc7a5e501c2a23c8a16ac | /ml_process/stock/test.py | d58e5eb639df4fb2bfaafb6b8e3619b495e012c2 | [] | no_license | choochootrain/newty2 | 4cb4cc1d9942729a3ca389cc6a35d1a769dafdb8 | 022b1eaedb87f290e23e7faf516d4bb2f42424c9 | refs/heads/master | 2021-01-22T06:44:16.840231 | 2013-05-17T20:48:21 | 2013-05-17T20:48:21 | 9,737,048 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 90 | py | f = open('AAPL.npy', 'r')
import numpy
x = numpy.load(f)
print x
print 'one_result', x[0]
| [
"[email protected]"
] | |
4dcda266d8a1504833b25c78716a5a608cb703b3 | 4c8228b9a5720e1159233d79f463645ffad2eb06 | /notification/urls.py | b876c19f8a259655cf3ca99c0a0233734aae5c03 | [] | no_license | prateek789/Device-Management | 58e7eab68614eca3aea49228e6914828871e0bc6 | 0c33134ba0836624118ef3c3c21455f2c1b262ab | refs/heads/master | 2021-01-20T02:42:04.694139 | 2017-04-27T09:05:33 | 2017-04-27T09:05:33 | 88,947,353 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 341 | py | from django.conf.urls import url
from notification import views
urlpatterns = [
url(r'^get-notification',views.get_notification,name='get-notification'),
url(r'^desktop-notification',views.desktop_notification,name='desktop-notification'),
url(r'^read-notification',views.read_notification,name='read-notification'),
] | [
"[email protected]"
] | |
3a6ccd4f7a0edffa6f93e9687c076417d0a1b0d7 | 7b5828edda7751700ca7002b40a214e39e5f48a8 | /EA/simulation/__hooks__.py | 85e30235b7cb2d5c0d2088433d93d3f9f0f6c835 | [] | no_license | daniela-venuta/Sims-4-Python-Script-Workspace | 54c33dac02f84daed66f46b7307f222fede0fa62 | f408b28fb34626b2e3b2953152343d591a328d66 | refs/heads/main | 2023-03-29T18:08:39.202803 | 2021-03-30T19:00:42 | 2021-03-30T19:00:42 | 353,111,243 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | RELOADER_ENABLED = False
__enable_gc_callback = True
import gc
try:
import _profile
except:
__enable_gc_callback = False
def system_init(gameplay):
import sims4.importer
sims4.importer.enable()
print('Server Startup')
if __enable_gc_callback:
gc.callbacks.append(_profile.notify_gc_function)
def system_shutdown():
global RELOADER_ENABLED
import sims4.importer
sims4.importer.disable()
RELOADER_ENABLED = False
| [
"[email protected]"
] | |
1f57a361859833b5623a103f29d1b18d051e101f | 05bec8583ff39d864557ea5df51a6ec2d14ede03 | /ms/accounts/migrations/0012_auto__add_field_myprofile_revenue__add_field_myprofile_balance.py | c0319c1d1e8ed753690eb13c4d497571ee22b0bb | [
"MIT"
] | permissive | joeyjy/ayi-django | 7a0164a61b24393c4cf132bf1abc9cd6032baa68 | de72f82a3754a56dc77c21501baa5809b6410057 | refs/heads/master | 2016-09-06T10:23:45.131049 | 2015-02-03T09:52:44 | 2015-02-03T09:52:44 | 21,931,383 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,935 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'MyProfile.revenue'
db.add_column(u'accounts_myprofile', 'revenue',
self.gf('django.db.models.fields.IntegerField')(null=True, blank=True),
keep_default=False)
# Adding field 'MyProfile.balance'
db.add_column(u'accounts_myprofile', 'balance',
self.gf('django.db.models.fields.IntegerField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'MyProfile.revenue'
db.delete_column(u'accounts_myprofile', 'revenue')
# Deleting field 'MyProfile.balance'
db.delete_column(u'accounts_myprofile', 'balance')
models = {
u'accounts.ayi': {
'Meta': {'ordering': "['name']", 'object_name': 'Ayi'},
'area': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'home': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'id_num': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'mobile': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'pic': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'rate': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'street_num': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'work_place': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'})
},
u'accounts.compound': {
'Meta': {'ordering': "['name']", 'object_name': 'Compound'},
'cross_street': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'district': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'street_address': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'street_name': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'})
},
u'accounts.myprofile': {
'Meta': {'object_name': 'MyProfile'},
'apt_num': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'area': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'balance': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'bldg_num': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'compound': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.Compound']", 'null': 'True', 'blank': 'True'}),
'cross': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile': ('django.db.models.fields.BigIntegerField', [], {}),
'mugshot': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'privacy': ('django.db.models.fields.CharField', [], {'default': "'registered'", 'max_length': '15'}),
'revenue': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'street_num': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'my_profile'", 'unique': 'True', 'to': u"orm['auth.User']"})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['accounts'] | [
"[email protected]"
] | |
f83d582584b1f3898d024e48b6fb7fe03a1db33f | fdab0c18eab28477d0980723c5ac5b4ba10c506f | /shelf/__init__.py | 7a691c178c4a1e6817a33c9fb6cc581431763af4 | [
"MIT"
] | permissive | MIT-Informatics/PreservationSimulation | 58b53595841c39e1fe00a05241be43ed0bcf6430 | 38c6641a25108022ce8f225a352f566ad007b0f3 | refs/heads/master | 2021-08-25T10:35:46.066554 | 2021-08-24T20:17:13 | 2021-08-24T20:17:13 | 17,369,426 | 9 | 0 | NOASSERTION | 2021-03-20T02:55:37 | 2014-03-03T15:03:30 | R | UTF-8 | Python | false | false | 30 | py | # Nothing to say here, yet.
| [
"[email protected]"
] | |
3d248b9822e566b434bc50291ba5c73e7f9d7aa3 | 564d6a4d305a8ac6a7e01c761831fb2081c02d0f | /sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_06_01/aio/operations/_virtual_machine_run_commands_operations.py | ae68dd96f544a951679e1e4b41833dc0b708fe85 | [
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] | permissive | paultaiton/azure-sdk-for-python | 69af4d889bac8012b38f5b7e8108707be679b472 | d435a1a25fd6097454b7fdfbbdefd53e05029160 | refs/heads/master | 2023-01-30T16:15:10.647335 | 2020-11-14T01:09:50 | 2020-11-14T01:09:50 | 283,343,691 | 0 | 0 | MIT | 2020-07-28T22:43:43 | 2020-07-28T22:43:43 | null | UTF-8 | Python | false | false | 7,966 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VirtualMachineRunCommandsOperations:
"""VirtualMachineRunCommandsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2018_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
location: str,
**kwargs
) -> AsyncIterable["models.RunCommandListResult"]:
"""Lists all available run commands for a subscription in a location.
:param location: The location upon which run commands is queried.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RunCommandListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2018_06_01.models.RunCommandListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.RunCommandListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str', pattern=r'^[-\w\._]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RunCommandListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/runCommands'} # type: ignore
async def get(
self,
location: str,
command_id: str,
**kwargs
) -> "models.RunCommandDocument":
"""Gets specific run command for a subscription in a location.
:param location: The location upon which run commands is queried.
:type location: str
:param command_id: The command id.
:type command_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RunCommandDocument, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2018_06_01.models.RunCommandDocument
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.RunCommandDocument"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str', pattern=r'^[-\w\._]+$'),
'commandId': self._serialize.url("command_id", command_id, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RunCommandDocument', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/runCommands/{commandId}'} # type: ignore
| [
"[email protected]"
] | |
19effaf2fd28cbfbcc5bf1197122f93d208d746b | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-res/huaweicloudsdkres/v1/model/show_res_datasource_request.py | 174e9c77405184d91a01ce3c43989fd6fba03d2b | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 4,956 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowResDatasourceRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'content_type': 'str',
'workspace_id': 'str',
'datasource_id': 'str'
}
attribute_map = {
'content_type': 'Content-Type',
'workspace_id': 'workspace_id',
'datasource_id': 'datasource_id'
}
def __init__(self, content_type=None, workspace_id=None, datasource_id=None):
"""ShowResDatasourceRequest
The model defined in huaweicloud sdk
:param content_type: 内容类型,取值为application/json。
:type content_type: str
:param workspace_id: 工作空间id。
:type workspace_id: str
:param datasource_id: 数据源id。
:type datasource_id: str
"""
self._content_type = None
self._workspace_id = None
self._datasource_id = None
self.discriminator = None
self.content_type = content_type
self.workspace_id = workspace_id
self.datasource_id = datasource_id
@property
def content_type(self):
"""Gets the content_type of this ShowResDatasourceRequest.
内容类型,取值为application/json。
:return: The content_type of this ShowResDatasourceRequest.
:rtype: str
"""
return self._content_type
@content_type.setter
def content_type(self, content_type):
"""Sets the content_type of this ShowResDatasourceRequest.
内容类型,取值为application/json。
:param content_type: The content_type of this ShowResDatasourceRequest.
:type content_type: str
"""
self._content_type = content_type
@property
def workspace_id(self):
"""Gets the workspace_id of this ShowResDatasourceRequest.
工作空间id。
:return: The workspace_id of this ShowResDatasourceRequest.
:rtype: str
"""
return self._workspace_id
@workspace_id.setter
def workspace_id(self, workspace_id):
"""Sets the workspace_id of this ShowResDatasourceRequest.
工作空间id。
:param workspace_id: The workspace_id of this ShowResDatasourceRequest.
:type workspace_id: str
"""
self._workspace_id = workspace_id
@property
def datasource_id(self):
"""Gets the datasource_id of this ShowResDatasourceRequest.
数据源id。
:return: The datasource_id of this ShowResDatasourceRequest.
:rtype: str
"""
return self._datasource_id
@datasource_id.setter
def datasource_id(self, datasource_id):
"""Sets the datasource_id of this ShowResDatasourceRequest.
数据源id。
:param datasource_id: The datasource_id of this ShowResDatasourceRequest.
:type datasource_id: str
"""
self._datasource_id = datasource_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowResDatasourceRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
cb5b9d62e05d98b76c67688a3d75d3244e7b7adb | 6436424c051740418a7f7fbf1d1c9eecbebd6076 | /example/pull_data.py | 4fc471b57683bfb5245e81c001231004df7e197d | [] | no_license | sweetaz/ClientSDK | d0cc4e0facb34af5de15d7f1e17ecc95bc49d44d | 774793b0ddf660a6be9710c4d3ef81e59dd66236 | refs/heads/master | 2022-08-01T02:04:10.035693 | 2020-05-18T23:02:41 | 2020-05-18T23:02:41 | 267,210,826 | 0 | 0 | null | 2020-05-27T03:25:02 | 2020-05-27T03:25:01 | null | UTF-8 | Python | false | false | 1,196 | py | # pylint: disable=W0703
import sys
from thematic_client_sdk import Auth, ThematicClient
def main():
# get token and args from command line
if len(sys.argv) < 3:
print("Usage: "+sys.argv[0]+' <refresh_token> <survey_id> <result_id>')
exit()
refresh_token = sys.argv[1]
survey_id = sys.argv[2]
result_id = None
output_format = None
if len(sys.argv) > 3:
output_format = sys.argv[3]
# swap token for an access token
auth = Auth()
access_token = auth.swap_refresh_token(refresh_token)
# create a client and upload the data
client = ThematicClient(access_token)
# get the processed file
try:
save_location = str(survey_id)+'_'+str(result_id)
client.data.download_data(save_location+'_processed.csv',
survey_id,
result_id=result_id,
output_format=output_format)
client.data.download_themes(save_location+'_processed.json', survey_id, result_id=result_id)
except Exception as exc:
print("Failed to get results: "+str(exc))
exit()
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
ea826e75e6998d1dcfa8a8729f39b309e8af21ba | 4a83e737212f0c5f83e13be33deeeec1a8a0f4f3 | /posts/models.py | c1da3670d120cf3d154b3b10fc97e667b2374f0a | [] | no_license | 4dragunov/praktikum_social_network | c83e6a9d1eea1d0980f315abdc6df3e68dbdcef3 | c5a521df9bd5641ec2946edaf4fdc0922f32accb | refs/heads/master | 2023-02-13T20:47:08.787317 | 2021-01-12T16:53:14 | 2021-01-12T16:53:14 | 288,222,320 | 3 | 6 | null | null | null | null | UTF-8 | Python | false | false | 1,598 | py | from django.contrib.auth import get_user_model
from django.db import models
User = get_user_model()
class Group(models.Model):
title = models.CharField(max_length=200)
description = models.TextField()
slug = models.SlugField(max_length=50, unique=True)
def __str__(self):
return self.title
class Post(models.Model):
text = models.TextField()
pub_date = models.DateTimeField("date published", auto_now_add=True)
author = models.ForeignKey(User, on_delete=models.CASCADE,
related_name="posts")
group = models.ForeignKey(Group, related_name="group",
on_delete=models.SET_NULL, max_length=100,
blank=True, null=True)
image = models.ImageField(upload_to='posts/', blank=True, null=True)
class Meta:
ordering = ["-pub_date"]
def __str__(self):
return self.text
class Comment(models.Model):
post = models.ForeignKey(Post, on_delete=models.CASCADE,
related_name='comments', null=True)
author = models.ForeignKey(User, on_delete=models.CASCADE,
related_name="comments", null=True)
text = models.TextField()
created = models.DateTimeField(auto_now_add=True)
unique_together = ["post", "author"]
class Follow(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE,
related_name="follower")
author = models.ForeignKey(User, on_delete=models.CASCADE,
related_name="following")
| [
"[email protected]"
] | |
70c3c0d93eb896c3ee369ec5553227a67a88c379 | d2a030f7a050a641fddd657e895651ee0310ae41 | /givers/migrations/0005_auto_20210907_2029.py | 171e7f8a50f139766839d1879f38085452c55ea8 | [] | no_license | Shawen17/Giveawaynow | f052a1055a96f2d0a392aaf748adcafbec2a5135 | 92f3bc0b359a712776661348e239b492894b81a1 | refs/heads/master | 2023-09-05T00:28:59.237486 | 2021-10-24T21:12:37 | 2021-10-24T21:12:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,603 | py | # Generated by Django 3.1.13 on 2021-09-07 19:29
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('givers', '0004_vendor_vendor_name'),
]
operations = [
migrations.AddField(
model_name='vendor',
name='profile',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='profile', to='givers.profile'),
preserve_default=False,
),
migrations.AddField(
model_name='vendor',
name='user',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='user', to='auth.user'),
preserve_default=False,
),
migrations.AlterField(
model_name='received',
name='gift_requested',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='commodity', to='givers.give'),
),
migrations.AlterField(
model_name='received',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='person', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='vendor',
name='gift',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='gift', to='givers.give'),
),
]
| [
"[email protected]"
] | |
4f7b4debed8c09169aca4546cfce15ecead65567 | 3473f99b1832e261d1d98ef5a683aff2526b979f | /Binder/discussions/migrations/0016_notifications1.py | 03b01b76fcad3d85b02eee1454d9d8d6e0c7ed4c | [
"MIT"
] | permissive | pavan71198/dashboard_IITG | ae8ac224de327d53f680dc76a07c0116ebd21d4a | 28bc053eb5f8c81b1877796c7a2f960a7764dbb4 | refs/heads/master | 2020-03-24T01:30:36.010276 | 2018-03-07T09:56:35 | 2018-03-07T09:56:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 918 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10a1 on 2016-07-07 12:12
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('discussions', '0015_auto_20160627_1450'),
]
operations = [
migrations.CreateModel(
name='notifications1',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=120, null=True)),
('writer', models.CharField(max_length=120, null=True)),
('message_id', models.PositiveIntegerField(null=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('creator', models.CharField(blank=True, max_length=120, null=True)),
],
),
]
| [
"[email protected]"
] | |
f1ed979f7f03e65b62d9aaaf4f00cfeb7810bf4c | 17b0e3401dd87d936a8d9f76f3415bef9760741f | /survey_app/choose-3/utils.py | ade77ccb1b252cebd418e3aa83da715a50e7fe25 | [] | no_license | ahare63/categorical-to-latent | 6d95d133e1305ef20947554847bfb1f8b0dafec5 | 18789af26e7e9f5d46feed663baeea3a1c4a5a55 | refs/heads/master | 2023-01-22T19:52:52.573417 | 2020-12-02T22:14:06 | 2020-12-02T22:14:06 | 302,120,024 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,691 | py | import glob
import json
# Go through all responses and condense results into single output
def get_response_results(include_never=True):
with open("results_template.json", 'r') as f:
summary = json.load(f)
# Iterate through each response
files = glob.glob("./responses/*.json")
for f in files:
with open(f, 'r') as resp_file:
resp = json.load(resp_file)
if resp["survey_type"] == "choose-3":
# Top-level questions
summary["num_responses"] += 1
summary["major"][resp["major"]] += 1
summary["frequency"][resp["frequency"]] += 1
summary["adoption"][resp["adoption"]] += 1
summary["level"][resp["level"]] += 1
if not include_never and resp["frequency"] == "A":
continue
# Responses for each comparison
for r in resp["query_responses"]:
if "weighted_set_cover" in r["option_to_model"].values():
summary["num_answered_weighted"] += 1
for inc in r["result"]:
model = r["option_to_model"][inc]
summary[model]["included_count_weighted"] += 1
else:
summary["num_answered_unweighted"] += 1
for inc in r["result"]:
model = r["option_to_model"][inc]
summary[model]["included_count_unweighted"] += 1
# Get averages and win percentages
for key in ["set_cover", "weighted_set_cover", "embedding_average", "word_movers_distance", "jaccard", "edit_distance"]:
if key != "set_cover":
summary[key]["included_pct_weighted"] = round(summary[key]["included_count_weighted"]/summary["num_answered_weighted"], 2)
if key != "weighted_set_cover":
summary[key]["included_pct_unweighted"] = round(summary[key]["included_count_unweighted"]/summary["num_answered_unweighted"], 2)
# Save results
with open("./results.json", 'w') as f:
json.dump(summary, f, indent=2)
# Take data in new_file, add any additional data in new_file to it, and save as new_file
def update_database(old_file, new_file):
with open(old_file, 'r') as f:
old = json.load(f)
with open(new_file, 'r') as f:
new = json.load(f)
for key in new.keys():
new_dict = new[key]
old_dict = old[key]
for k in old_dict.keys():
if k not in new_dict:
new_dict[k] = old_dict[k]
with open(new_file, 'w') as f:
json.dump(new, f, indent=2)
if __name__ == "__main__":
get_response_results() | [
"[email protected]"
] | |
426a4d1815fa28c588b242e1acdb46bcae50ea1b | 7987490ca883c62e0172a18fa4e1c9267a96357c | /app/common/__init__.py | b14338470f87cdd4a5326a411a92ebca41588483 | [] | no_license | kulongwangzhi85/web-python-flask | 294b4f4c217ba829cf94c765def23cbc5adf36d9 | 1766a056a7d6b9f4f5110f9bbc0d76eeb6c676f2 | refs/heads/master | 2020-07-05T11:09:06.805755 | 2016-09-17T08:36:53 | 2016-09-17T08:36:53 | 67,272,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 39 | py | import views
import models
import forms | [
"[email protected]"
] | |
851ad067a6a1447415a84ba38c63afa33fab9098 | ea0d8dc86b07c802769d74dc38d97154a79c1dcf | /window-introduction.py | c8b52caf7711f6339afae5e9ea31c75dda603f10 | [] | no_license | aachellis/Spark-Join-Window_Introduction | a0d33934f23f7b11e5dcb70a1087adba58daed6a | cb237cbc152f2deec9ffe8d034550fea8d8e9e31 | refs/heads/master | 2022-12-08T21:03:34.607144 | 2020-08-04T03:27:59 | 2020-08-04T03:27:59 | 284,867,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,744 | py | '''
At its core, a window function calculates a return value for every input row of a table based on a group of rows, called the Frame. Every input row can have a unique frame associated with it. This characteristic of window functions makes them more powerful than other functions and allows users to express various data processing tasks that are hard (if not impossible) to be expressed without window functions in a concise way.
Window functions allow users of Spark SQL to calculate results such as the rank of a given row or a moving average over a range of input rows.
'''
#Importing necessry functions
from pyspark.sql import SparkSession
from pyspark.sql.window import Window
from pyspark.sql.functions import col,row_number,dense_rank,max
#Creating Spark SQL session and reading from a CSV File
spark = SparkSession.builder.master("local").config("spark.sql.wirehouse.dir","c:\\tmp").appName("Window function Introduction").getOrCreate()
df = spark.read.csv("product_revenue.csv", inferSchema = True, header = True)
#Answering the question: "What are the best-selling and the second best-selling products in every category?"
win_func = Window.partitionBy("category").orderBy(df["revenue"].desc())
df_first = df.withColumn("rank",dense_rank().over(win_func))
df_first.where(col("rank") <= 2).orderBy(col("rank").asc()).select("product","category","revenue").show()
#Answering the question: "What is the difference between the revenue of each product and the revenue of the best selling product in the same category as that product?"
win_func_max = Window.partitionBy("category").orderBy(df["revenue"].desc())
df_max = df.withColumn("revenue_diff",max(df["revenue"]).over(win_func_max)-df["revenue"])
df_max.show()
| [
"[email protected]"
] | |
0235d2d74cea99b568c3290179d49bebc8256fdc | 94829120ea91ee9e8a9c364ae52c7b9f373656c1 | /src/converters/rosbagConverter.py | 79dedb35d5ade3db221e39a6f5976fd69d0c39ef | [] | no_license | asdewar/event_conversor | e27513ffda764459bc6ec65155a0e1d80d1e03a6 | 596fd63c9e4dc93684f551baf3f4f596ef84d83f | refs/heads/master | 2023-07-11T10:07:33.744410 | 2021-07-12T14:11:48 | 2021-07-12T14:11:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,214 | py | from src.utils.utils import combine, getNumProgress
from src.format.bag.EventRosbag import Event as _Event
from src.format.EventClass import Event
from src.config.config import Config
from src.ui.UI import UI
import rosbag
import rospy
def rosbagToAbstract(input_file):
UI().objectUI.showMessage("Starting to read bag file", "w")
bag = rosbag.Bag(input_file)
c = Config()
if c.rosbag:
topic = c.config_data["rosbag"]["topic"]
else:
topics = bag.get_type_and_topic_info().topics
topic = UI().objectUI.chooseWindow("Which is the topic that contains the events?: ", topics)
event_list = []
num_progress = getNumProgress(bag.get_message_count(topic))
i = 0
for topic, msg, t in bag.read_messages(topics=topic):
if i % num_progress == 0:
UI().objectUI.sumProgress()
i += 1
aux_list = []
if "EventArray" in str(type(msg)): # msg._type
aux_list = msg.events
else:
aux_list.append(msg)
for event in aux_list:
event_list.append(Event(
event.x,
event.y,
event.polarity,
combine(event.ts.secs, event.ts.nsecs)
))
bag.close()
UI().objectUI.sumProgress(True)
UI().objectUI.showMessage("Finishing reading the bag file", "c")
return event_list
def abstractToRosbag(event_list, output_file):
UI().objectUI.showMessage("Starting to write bag file", "w")
bag = rosbag.Bag(output_file, "w")
c = Config()
if c.rosbag:
topic = c.config_data["rosbag"]["topic"]
else:
topic = UI().objectUI.simpleInput("Introduce the name of the topic where the events are going to be write: ")
num_progress = getNumProgress(len(event_list))
for i, event in enumerate(event_list):
if i % num_progress == 0:
UI().objectUI.sumProgress()
e = _Event()
e.x = event.x
e.y = event.y
e.polarity = event.pol
e.ts = rospy.Time.from_sec(event.ts)
bag.write(topic, e)
UI().objectUI.sumProgress(True)
bag.close()
UI().objectUI.showMessage("Finishing writing the bag file", "c")
| [
"[email protected]"
] | |
a69501bb9f9d0f85d61b8e4912356da88c453e3b | 2f2fb10563b48675e5eb81b146a2338cc19dc447 | /nets/vgg.py | 0cfacdebd317ed366c69eef94c74e6bec8845a7d | [
"MIT"
] | permissive | longzhen520/Tensor_Layer_for_Deep_Neural_Network_Compression | d18c2644c0ef7d78f7b4ce4006429c4ff1f1e1b7 | 2fe88a989501e4c1f5e17d05873efe6906f45c55 | refs/heads/master | 2023-01-15T08:07:26.950156 | 2020-11-26T15:26:50 | 2020-11-26T15:26:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,725 | py | import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
from torch.optim.lr_scheduler import StepLR
import torchvision
import torchvision.transforms as transforms
from torchvision import models
cfg = {
'VGG11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'VGG19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
class VGG(nn.Module):
def __init__(self, vgg_name):
super(VGG, self).__init__()
self.features = self._make_layers(cfg[vgg_name])
self.classifier = nn.Sequential(
nn.Dropout(0.25),
nn.Linear(512, 128),
nn.ReLU(inplace=True),
nn.Dropout(0.25),
nn.Linear(128, 10),
)
def forward(self, x):
out = self.features(x)
out = out.view(out.size(0), -1)
out = self.classifier(out)
return out
def _make_layers(self, cfg):
layers = []
in_channels = 3
for x in cfg:
if x == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
nn.BatchNorm2d(x),
nn.ReLU(inplace=True)]
in_channels = x
layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
return nn.Sequential(*layers) | [
"[email protected]"
] | |
3246ee08ccb18b69003effd0ef68bc968dc2dc72 | 6ea7ed159566fa290dbdf1524bba3ceec9c2da6a | /recipe_scraper/models.py | 5355e123eec8721924911dc52c29f4f7a29d6b16 | [] | no_license | jeffrey-hung/recipe-scraper | 9215cc23275d64d9262a98a130269c01acf7b1ac | 7b2073b70d5d7434501e8d84037da239087c902e | refs/heads/master | 2022-11-25T18:14:38.889719 | 2019-11-29T06:46:33 | 2019-11-29T06:46:33 | 224,792,882 | 0 | 0 | null | 2022-11-22T04:51:46 | 2019-11-29T06:42:03 | JavaScript | UTF-8 | Python | false | false | 1,694 | py | from django.db import models
from django.contrib.auth import get_user_model
# Create your models here.
class Ingredient(models.Model):
name = models.CharField(
max_length=200,
primary_key=True,
)
def __str__(self):
return self.name
class Recipe(models.Model):
name = models.CharField(
max_length=300,
)
description = models.TextField()
image = models.ImageField(
upload_to = "recipe_images",
default=None,
blank=True,
null=True,
)
def __str__(self):
return self.name
class RecipeIngredient(models.Model):
recipe = models.ForeignKey(
'Recipe',
on_delete=models.CASCADE,
)
ingredient = models.ForeignKey(
'Ingredient',
on_delete=models.CASCADE,
)
quantity = models.CharField(max_length=50)
class UserIngredient(models.Model):
user = models.ForeignKey(
get_user_model(),
on_delete=models.CASCADE,
)
ingredient = models.ForeignKey(
'Ingredient',
on_delete=models.CASCADE,
)
class FavouriteRecipe(models.Model):
user = models.ForeignKey(
get_user_model(),
on_delete=models.CASCADE
)
recipe = models.ForeignKey(
'Recipe',
on_delete=models.CASCADE
)
class Tag(models.Model):
name = models.CharField(
max_length=60,
primary_key=True,
)
def __str__(self):
return self.name
class RecipeTag(models.Model):
recipe = models.ForeignKey(
'Recipe',
on_delete=models.CASCADE,
)
tag = models.ForeignKey(
'Tag',
on_delete=models.CASCADE
) | [
"[email protected]"
] | |
8434e794040a56ddfae1683df8ef406eb49ca681 | ea43ceb244b86f73fd8cee09da3a4b3d86f4100d | /SeamCarvingReduceWidth.py | cec01c60fc8826e9f93a349c923461513e366781 | [] | no_license | SrinidhiPalwayi/ps2 | 0cdf6786f4cfe1e7bc23e27e29e09fa66719409d | c8920888b9e83996da2865af01dbc9955dcaae77 | refs/heads/master | 2020-03-30T07:06:52.738058 | 2018-10-02T23:30:31 | 2018-10-02T23:30:31 | 150,916,170 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 872 | py | from cumulative_minimum_energy_map import cumulative_minimum_energy_map
from find_optimal_vertical_seam import find_optimal_vertical_seam
from energy_image import energy_image
from reduceWidth import reduceWidth
import matplotlib.pylab as plt
import numpy as np
import scipy.misc
image = 'inputSeamCarvingPrague.jpg'
ei =energy_image(image)
scipy.misc.imsave('outputEnergyPrague.png', ei)
image_array = np.asarray(plt.imread(image))
for i in range(0,100):
image_array, ei = reduceWidth(image_array, ei)
scipy.misc.imsave('outputEnergyReduceWidthPrague.png', ei)
scipy.misc.imsave('outputReduceWidthPrague.png', image_array)
image = 'inputSeamCarvingMall.jpg'
ei =energy_image(image)
image_array = np.asarray(plt.imread(image))
for i in range(0,100):
image_array, ei = reduceWidth(image_array, ei)
scipy.misc.imsave('outputReduceWidthMall.png', image_array) | [
"[email protected]"
] | |
e7b6cc2bcf1753ac1733d91128525009149d7115 | fc9f070fcff2cee6d3acc7111c3c83c2c4652911 | /blog/views.py | e464c8fbb592351c7ff248aff9831e1a4dafda7b | [] | no_license | hopgausi/Blog | 26e5a8f6f95e18c39e0551832eb4fbd604141c87 | 43309c18e81d7ae3cb18be99fb7fd7aafa5d525f | refs/heads/master | 2023-03-20T02:57:56.642381 | 2021-03-07T14:48:15 | 2021-03-07T14:48:15 | 289,376,271 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,439 | py | from django.shortcuts import render, get_object_or_404, redirect
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.contrib import messages
from django.utils.text import slugify
from django.views import generic
from .models import Article
from .forms import ArticleForm
class IndexView(generic.ListView):
model = Article
template_name = 'blog/index.html'
context_object_name = 'articles'
def get_queryset(self):
articles = Article.objects.filter(publish=True)[:5]
return articles
class ArticleDetailView(generic.DetailView):
template_name = 'blog/detail.html'
model = Article
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["title"] = 'title'
return context
@login_required
def create_post(request):
if request.method == 'POST':
form = ArticleForm(request.POST)
if form.is_valid():
#get logged in user
post_author = request.user
#slugify post title
post_title = slugify(form.cleaned_data.get('title'))
# save the post without commiting to db
post = form.save(commit=False)
# assign to post Article model
post.slug = post_title
post.author = post_author
# save the post
post.save()
messages.success(request, 'Your Post Has been created!')
return redirect('blog:home')
else:
form = ArticleForm()
context = {
'form': form,
}
template_name = 'blog/create_post.html'
return render(request, template_name, context)
class PostUpdateView(LoginRequiredMixin, UserPassesTestMixin, generic.UpdateView):
model = Article
form_class = ArticleForm
template_name = 'blog/create_post.html'
def test_func(self):
post = self.get_object()
if self.request.user == post.author:
return True
else:
return False
class PostDeleteView(LoginRequiredMixin, UserPassesTestMixin, generic.DeleteView):
model = Article
success_url = '/'
template_name = 'blog/delete_post.html'
def test_func(self):
post = self.get_object()
if self.request.user == post.author:
return True
else:
return False | [
"[email protected]"
] | |
8600e3c9ea89535253035fff93316cd1d4f2aaa2 | e4384d6231d93171141b6e5023a31c01f3e2b5cc | /populate_rango.py | c592df4f8b332fe5b23210e19ea3cded5ef33816 | [] | no_license | lumaen/tango_with_django_project | 651a588dc1140c5005bf1976cce23c7af58663df | a56bbb9f1d317701cb42a4599cb3672b9b93b9d7 | refs/heads/main | 2023-03-02T16:12:20.961986 | 2021-02-05T14:44:58 | 2021-02-05T14:44:58 | 329,926,909 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,612 | py | # This script automatically populates the rango database
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE',
'tango_with_django_project.settings')
import django
django.setup()
from rango.models import Category, Page
# Actual method to populate the database
def populate():
# Lists of dictionaries containing the Pages
# Each list is a Category
python_pages = [
{'title': 'Official Python Tutorial',
'url':'http://docs.python.org/3/tutorial/',
'views': 54},
{'title':'How to Think like a Computer Scientist',
'url':'http://www.greenteapress.com/thinkpython/',
'views': 74},
{'title':'Learn Python in 10 Minutes',
'url':'http://www.korokithakis.net/tutorials/python/',
'views': 25} ]
django_pages = [
{'title':'Official Django Tutorial',
'url':'https://docs.djangoproject.com/en/2.1/intro/tutorial01/',
'views': 44},
{'title':'Django Rocks',
'url':'http://www.djangorocks.com/',
'views': 15},
{'title':'How to Tango with Django',
'url':'http://www.tangowithdjango.com/',
'views': 57} ]
other_pages = [
{'title':'Bottle',
'url':'http://bottlepy.org/docs/dev/',
'views': 85},
{'title':'Flask',
'url':'http://flask.pocoo.org',
'views': 78} ]
# Dictionary containing the name of the Categories and the respective Pages
cats = {'Python': {'pages': python_pages, 'views': 128, 'likes': 64},
'Django': {'pages': django_pages, 'views': 64, 'likes': 32},
'Other Frameworks': {'pages': other_pages, 'views': 32, 'likes': 16} }
# Add Categories and Pages
for cat, cat_data in cats.items():
c = add_cat(cat, cat_data['views'], cat_data['likes'])
for p in cat_data['pages']:
add_page(c, p['title'], p['url'], p['views'])
# Print out the Categories added
for c in Category.objects.all():
for p in Page.objects.filter(category=c):
print(f'- {c}: {p}')
# Method to add the Category
def add_cat(name, views, likes):
c = Category.objects.get_or_create(name=name)[0]
c.views = views
c.likes = likes
c.save()
return c
# Method to add the Page
def add_page(cat, title, url, views):
p = Page.objects.get_or_create(category=cat, title=title)[0]
p.url = url
p.views = views
p.save()
return p
# Start the execution
if __name__ == '__main__':
print('Starting Rango population script...')
populate()
| [
"[email protected]"
] | |
9f91ebdd34a36490f95c4dd170707f89b982530a | 122280a785fe08f6f520ad510068c584bc216a79 | /src/lr_schedulers.py | ba5bea2bf04ed1c6f8b8ce0b1378c0a024596807 | [] | no_license | AniketGurav/forensic-writer-identification-and-writer-retrieval-using-a-vision-transformer | 9824cc68854f8c4c5600e50272663e422d4ccc83 | 74421c74782cd03725f315f42b310e33afbda921 | refs/heads/main | 2023-09-04T22:48:02.753712 | 2021-11-14T13:12:01 | 2021-11-14T13:12:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,097 | py | class WarmUpLR:
"""Learning rate warmup scheduler as used by [1] (implementation: [2]).
The learning rate is warmed up by dividing the initial learning rate by (`num_epochs_warm_up` - i),
where i is the current epoch (0 based indexing). After the learning rate was warmed up for `num_epochs_warm_up`
epochs,
it stays the same.
Example: `num_epochs_warm_up` = 5, lr ... learning rate
1. Epoch: lr = initial lr / (5 - 0) = initial lr / 5
2. Epoch: lr = initial lr / (5 - 1) = initial lr / 4
3. Epoch: lr = initial lr / (5 - 2) = initial lr / 3
4. Epoch: lr = initial lr / (5 - 3) = initial lr / 2
>= 5. Epoch: lr = initial lr / (5 - 4) = initial lr / 1 = initial lr
Note: A warm up learning rate scheduler will be implemented in PyTorch in a future release (see [3]).
References:
[1] A. Hassani, S. Walton, N. Shah, A. Abuduweili, J. Li, and H. Shi, ‘Escaping the Big Data Paradigm with
Compact Transformers’, arXiv:2104.05704 [cs], Jun. 2021, Accessed: Jul. 19, 2021. [Online]. Available:
http://arxiv.org/abs/2104.05704
[2] https://github.com/SHI-Labs/Compact-Transformers/blob/e7fe3532dd17c4dafd5afae32082e96c4bf780b3/main.py#L186,
Accessed: 2021-08-22
[3] https://github.com/pytorch/pytorch/pull/60836, Accessed: 2021-08-22
"""
def __init__(self, optimizer, initial_lr, num_epochs_warm_up=10):
"""
Args:
optimizer: The used optimizer
initial_lr: The initial learning rate
num_epochs_warm_up (optional): The number of epochs the learning rate should be warmed up
"""
self.optimizer = optimizer
self.initial_lr = initial_lr
self.num_epochs_warm_up = num_epochs_warm_up
self.last_epoch = 0
def step(self):
if self.last_epoch < self.num_epochs_warm_up:
lr = self.initial_lr / (self.num_epochs_warm_up - self.last_epoch)
else:
lr = self.initial_lr
self.last_epoch += 1
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
| [
"[email protected]"
] | |
75ca7486648bdecf2f8c0a230e5a1429072c4711 | 04b7663dceeab120224ba391c941a6498097cf1d | /profe-08-09-2021-2.py | 0862cbbd7c3c8ac466832247799e0a90c7f04451 | [] | no_license | TeacherAndres/Teinco-electivaTecnologica | 93217d58d95fdd6ddc027a40bc9957247ca5bf7e | 5c8606f83ad32f22e295e6c8464eaba9e0aff1c5 | refs/heads/master | 2023-08-13T08:35:30.365866 | 2021-09-22T03:32:10 | 2021-09-22T03:32:10 | 402,059,881 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,777 | py | #Definir y crear diccionario (perro)
perro = {'dueño': 'Federiko','raza':'canino','vacunas':['tosferina','rabia','cancha']}
#Imprimir diccionario perro
print(perro)
#Imprimir valor por la llave
print(perro['dueño'])
#Imprimir valor por llave y pocision interna
print(perro['vacunas'])
print(perro['vacunas'][0])
print(perro['vacunas'][2])
#ZIP
print("******************** ZIP ***************")
diccionariobasico = dict(zip('abcd',[1,2,3,4]))
print(diccionariobasico)
#KEYS
print("******************** KEYS ***************")
keys=perro.keys()
print(keys)
#VALUES
print("******************** VALUES ***************")
valores=perro.values()
print(valores)
#COPY
print("******************** COPY ***************")
perro2 = perro.copy()
print(perro2)
#FROMKEYS
print("******************** FROMKEYS ***************")
perro3 = perro2.fromkeys(['a','b','c','d','e'],1)
print(perro3)
#get()
print("******************** GET ***************")
valorI = perro2.get('dueño')
print(valorI)
#pop()
print("******************** POP ***************")
dic = {'a':1,'b':2,'c':3,'d':4}
print(dic)
dic.pop('b')
dic.pop('c')
print(dic)
#setdeault
print("******************** setdeault ***************")
dic = {'a':1,'b':2,'c':3,'d':4}
valor = dic.setdefault('a')
print(dic)
valor = dic.setdefault('e',['a','b','c'])
print(dic)
print(dic['e'][2])
#INSERT
print("******************** INSERT ***************")
datosUsr = []
print(datosUsr)
datosUsr.insert(0,'pepe')
datosUsr.insert(1,'majarrez')
datosUsr.insert(2,'limonche')
print(datosUsr)
#UPDATE
print("******************** UPDATE ***************")
dicI = {'a':1,'b':2,'c':3,'d':4}
discS = {'a':1,'b':2,'e':3,'f':4}
dicI.update(discS)
print(dicI)
dict= {'valorUltimo':'este es'}
dicI.update(dict)
print(dicI)
# SEGUNDA FORMA DE CREAR UN DICCIONARIOS
"""
print("******************** CREAR DICCIONARIO SEGUNDA FORMA ***************")
d2 = dict([
('Nombre', 'Sara'),
('Edad', 27),
('Documento', 1003882),
])
print (d2)
"""
#FOR
print("******************** RECORRE DICCIONARIO ***************")
for x in perro:
print(perro[x])
#ANIDAR DICCIONARIO
print("******************** ADICIONAR DICCIONARIO ***************")
dicI = {'a':1,'b':2,'c':3,'d':4}
discS = {'nombre':'Raul','apellido':'MElano','Telefono':312313,'direccion':'CALLE falas 123'}
a = {
"dicLEtras":dicI,
"dicDatosPEr":discS
}
print(a)
print("**:****************** ADICIONAR DICCIONARIO DENTRO DE DICCIONARIO ***************")
b = {
"dicDatosPerro":perro,
"dicCumpuestoI":a
}
print(b)
print(b.get('dicCumpuestoI'))
print(b['dicCumpuestoI']['dicDatosPEr']['direccion'])
i = b.get('dicCumpuestoI')
s = i.get('dicDatosPEr')
f = s.get('direccion')
print(f)
r=b.get('dicCumpuestoI').get('dicDatosPEr').get('direccion')
print(r)
# BREACK 9:15
| [
"[email protected]"
] | |
4949fecd8736ec02cd382e73f6d52b49b11481e7 | 9249947c07f8addf64dd3d2a2f9f37d379f83921 | /libs/paramiko/__init__.py | 9e2ba013c33361a03c18e2ef64327289c8e2581b | [
"MIT"
] | permissive | operepo/ope | eb71aa763d157416009d7c3052ace11852660e0a | 018c82af46845315795c67c36801e2a128f515d5 | refs/heads/master | 2023-08-08T15:05:28.592589 | 2023-07-25T00:22:24 | 2023-07-25T00:22:24 | 96,855,111 | 12 | 11 | MIT | 2023-03-03T15:10:34 | 2017-07-11T05:42:14 | Perl | UTF-8 | Python | false | false | 3,922 | py | # Copyright (C) 2003-2011 Robey Pointer <[email protected]>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
import sys
from paramiko._version import __version__, __version_info__
if sys.version_info < (2, 6):
raise RuntimeError('You need Python 2.6+ for this module.')
__author__ = "Jeff Forcier <[email protected]>"
__license__ = "GNU Lesser General Public License (LGPL)"
from paramiko.transport import SecurityOptions, Transport
from paramiko.client import SSHClient, MissingHostKeyPolicy, AutoAddPolicy, RejectPolicy, WarningPolicy
from paramiko.auth_handler import AuthHandler
from paramiko.ssh_gss import GSSAuth, GSS_AUTH_AVAILABLE
from paramiko.channel import Channel, ChannelFile
from paramiko.ssh_exception import SSHException, PasswordRequiredException, \
BadAuthenticationType, ChannelException, BadHostKeyException, \
AuthenticationException, ProxyCommandFailure
from paramiko.server import ServerInterface, SubsystemHandler, InteractiveQuery
from paramiko.rsakey import RSAKey
from paramiko.dsskey import DSSKey
from paramiko.ecdsakey import ECDSAKey
from paramiko.sftp import SFTPError, BaseSFTP
from paramiko.sftp_client import SFTP, SFTPClient
from paramiko.sftp_server import SFTPServer
from paramiko.sftp_attr import SFTPAttributes
from paramiko.sftp_handle import SFTPHandle
from paramiko.sftp_si import SFTPServerInterface
from paramiko.sftp_file import SFTPFile
from paramiko.message import Message
from paramiko.packet import Packetizer
from paramiko.file import BufferedFile
from paramiko.agent import Agent, AgentKey
from paramiko.pkey import PKey
from paramiko.hostkeys import HostKeys
from paramiko.config import SSHConfig
from paramiko.proxy import ProxyCommand
from paramiko.common import AUTH_SUCCESSFUL, AUTH_PARTIALLY_SUCCESSFUL, AUTH_FAILED, \
OPEN_SUCCEEDED, OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED, OPEN_FAILED_CONNECT_FAILED, \
OPEN_FAILED_UNKNOWN_CHANNEL_TYPE, OPEN_FAILED_RESOURCE_SHORTAGE
from paramiko.sftp import SFTP_OK, SFTP_EOF, SFTP_NO_SUCH_FILE, SFTP_PERMISSION_DENIED, SFTP_FAILURE, \
SFTP_BAD_MESSAGE, SFTP_NO_CONNECTION, SFTP_CONNECTION_LOST, SFTP_OP_UNSUPPORTED
from paramiko.common import io_sleep
__all__ = [ 'Transport',
'SSHClient',
'MissingHostKeyPolicy',
'AutoAddPolicy',
'RejectPolicy',
'WarningPolicy',
'SecurityOptions',
'SubsystemHandler',
'Channel',
'PKey',
'RSAKey',
'DSSKey',
'Message',
'SSHException',
'AuthenticationException',
'PasswordRequiredException',
'BadAuthenticationType',
'ChannelException',
'BadHostKeyException',
'ProxyCommand',
'ProxyCommandFailure',
'SFTP',
'SFTPFile',
'SFTPHandle',
'SFTPClient',
'SFTPServer',
'SFTPError',
'SFTPAttributes',
'SFTPServerInterface',
'ServerInterface',
'BufferedFile',
'Agent',
'AgentKey',
'HostKeys',
'SSHConfig',
'util',
'io_sleep' ]
| [
"[email protected]"
] | |
78dfc7827343c4caa8b548bc7ae34fd16337a447 | 76326907ce2dc3407e81c29f450492266d8a36a4 | /usuarios/forms.py | 4dcbac355139f46a42c362607a10894a99c7c247 | [] | no_license | embolatado/ffwapp | a193e2f0083fe728b61a4be4963bda9c9f998415 | 2533cb8474967994de00c2042a99da32fdd13eb6 | refs/heads/main | 2023-05-12T23:39:12.069268 | 2021-05-23T00:21:09 | 2021-05-23T00:21:09 | 365,068,940 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 304 | py | from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
class UsuarioRegistroForm(UserCreationForm):
email = forms.EmailField()
class Meta:
model = User
fields = ['username', 'email', 'password1', 'password2']
| [
"[email protected]"
] | |
f44312a56f753dec7e321a13f2d402666c08d473 | 779af874adf1647461981b0c36530cf9924f5f01 | /python3/dist-packages/plainbox/impl/exporter/text.py | c8889b30876cfff0a422e3a3d37debfa5f7bf396 | [] | no_license | hitsuyo/Library_Python_3.5 | 8974b5de04cb7780b0a1a75da5cb5478873f08e7 | 374e3f9443e4d5cae862fd9d81db8b61030ae172 | refs/heads/master | 2022-11-05T23:46:47.188553 | 2018-01-04T19:29:05 | 2018-01-04T19:29:05 | 116,093,537 | 1 | 2 | null | 2022-10-26T03:07:06 | 2018-01-03T05:02:20 | Python | UTF-8 | Python | false | false | 3,001 | py | # This file is part of Checkbox.
#
# Copyright 2012 Canonical Ltd.
# Written by:
# Zygmunt Krynicki <[email protected]>
#
# Checkbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3,
# as published by the Free Software Foundation.
#
# Checkbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Checkbox. If not, see <http://www.gnu.org/licenses/>.
"""
:mod:`plainbox.impl.exporter.text` -- plain text exporter
=========================================================
.. warning::
THIS MODULE DOES NOT HAVE STABLE PUBLIC API
"""
from plainbox.i18n import gettext as _
from plainbox.impl.color import Colorizer
from plainbox.impl.exporter import SessionStateExporterBase
from plainbox.impl.result import outcome_meta
class TextSessionStateExporter(SessionStateExporterBase):
"""Human-readable session state exporter."""
def __init__(self, option_list=None, color=None, exporter_unit=None):
super().__init__(option_list, exporter_unit=exporter_unit)
self.C = Colorizer(color)
def get_session_data_subset(self, session_manager):
return session_manager.state
def dump(self, session, stream):
for job in session.run_list:
state = session.job_state_map[job.id]
if state.result.is_hollow:
continue
if self.C.is_enabled:
stream.write(
" {}: {}\n".format(
self.C.custom(
outcome_meta(state.result.outcome).unicode_sigil,
outcome_meta(state.result.outcome).color_ansi
), state.job.tr_summary(),
).encode("UTF-8"))
if len(state.result_history) > 1:
stream.write(_(" history: {0}\n").format(
', '.join(
self.C.custom(
result.outcome_meta().tr_outcome,
result.outcome_meta().color_ansi)
for result in state.result_history)
).encode("UTF-8"))
else:
stream.write(
"{:^15}: {}\n".format(
state.result.tr_outcome(),
state.job.tr_summary(),
).encode("UTF-8"))
if state.result_history:
print(_("History:"), ', '.join(
self.C.custom(
result.outcome_meta().unicode_sigil,
result.outcome_meta().color_ansi)
for result in state.result_history))
| [
"[email protected]"
] | |
f20ebb6b95329c51dbc9d1fc3c6639b1ee3ec076 | 07d337a6e62d246ef36e62a72ca4ad403a40e960 | /Faster RCNN/utils/backbone_utils.py | fd9a365752196a40d8a57be9441449155e6389dd | [
"Apache-2.0"
] | permissive | Chrisa142857/PolarNet-GCdet | cff1ffa46e5b5a8446e6aec378a5834622e73028 | f706877c3ecac68c0cb03ef52065176837cb08e3 | refs/heads/main | 2023-05-22T23:58:23.544591 | 2022-07-03T13:04:01 | 2022-07-03T13:04:01 | 494,720,559 | 9 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,448 | py | #!/usr/bin/env python
# coding=utf-8
import torch.nn as nn
from torchvision.models import resnet
from torchvision.models import densenet
from .layergetter import IntermediateLayerGetter, DenseNetLayerGetter
from .fpn import FeaturePyramidNetwork, MaxpoolOnP5, Bottom_up_path
from .fpn import AttFeaturePyramidNetwork
from .fpn import PANModule
from .misc import FrozenBatchNorm2d
class BackboneWithFPN(nn.Module):
def __init__(self, backbone, return_layers,
in_channels_list, out_channel):
super(BackboneWithFPN, self).__init__()
self.body = IntermediateLayerGetter(backbone, return_layers)
self.fpn = FeaturePyramidNetwork(in_channels_list,
out_channel,
extra_block=MaxpoolOnP5())
# self.afpn = AttFeaturePyramidNetwork(in_channels_list,
# out_channel,
# extra_block=MaxpoolOnP5())
# self.pan = PANModule(in_channels_list,
# out_channel)
# self.bottom_up = Bottom_up_path([256,256,256,256],
# out_channel,
# extra_block=MaxpoolOnP5())
self.out_channels = out_channel
def forward(self, x):
x = self.body(x)
# x = self.afpn(x)
x = self.fpn(x)
# x = self.bottom_up(x)
return x
class BackboneWithAFPN(nn.Module):
def __init__(self, backbone, return_layers,
in_channels_list, out_channel):
super(BackboneWithAFPN, self).__init__()
self.body = IntermediateLayerGetter(backbone, return_layers)
# self.fpn = FeaturePyramidNetwork(in_channels_list,
# out_channel,
# extra_block=MaxpoolOnP5())
self.afpn = AttFeaturePyramidNetwork(in_channels_list,
out_channel,
extra_block=MaxpoolOnP5())
# self.pan = PANModule(in_channels_list,
# out_channel)
# self.bottom_up = Bottom_up_path([256,256,256,256],
# out_channel,
# extra_block=MaxpoolOnP5())
self.out_channels = out_channel
def forward(self, x):
x = self.body(x)
x = self.afpn(x)
# x = self.fpn(x)
# x = self.bottom_up(x)
return x
class BackboneWithFPNForDensenet(nn.Module):
def __init__(self, backbone, in_channels_list, out_channel):
super(BackboneWithFPNForDensenet, self).__init__()
self.body = DenseNetLayerGetter(backbone)
self.fpn = FeaturePyramidNetwork(in_channels_list,
out_channel,
extra_block=MaxpoolOnP5())
self.afpn = AttFeaturePyramidNetwork(in_channels_list,
out_channel,
extra_block=MaxpoolOnP5())
# self.pan = PANModule(in_channels_list,
# out_channel)
# self.bottom_up = Bottom_up_path([256,256,256,256],
# out_channel,
# extra_block=MaxpoolOnP5())
self.out_channels = out_channel
def forward(self, x):
x = self.body(x)
x = self.afpn(x)
# x = self.fpn(x)
# x = self.bottom_up(x)
return x
# def resnet_fpn_backbone(backbone_name, pretrained,
# norm_layer=FrozenBatchNorm2d):
# backbone = resnet.__dict__[backbone_name](
# pretrained=pretrained,
# norm_layer=norm_layer
# )
# for name, param in backbone.named_parameters():
# if "layer2" not in name and "layer3" not in name and "layer4" not in name:
# param.requires_grad_(False)
# return_layers = {"layer1": "0", "layer2": "1",
# "layer3": "2", "layer4": "3"}
# in_channels_stage2 = backbone.inplanes // 8
# in_channels_list = [
# in_channels_stage2,
# in_channels_stage2 * 2,
# in_channels_stage2 * 4,
# in_channels_stage2 * 8,
# ]
# out_channel = 256
# return BackboneWithFPN(backbone, return_layers,
# in_channels_list, out_channel)
def resnet_fpn_backbone(backbone_name, pretrained, use_attn=0):
backbone = resnet.__dict__[backbone_name](
pretrained=True
)
for name, param in backbone.named_parameters():
if "layer2" not in name and "layer3" not in name and "layer4" not in name:
param.requires_grad_(False)
return_layers = {"layer1": "0", "layer2": "1",
"layer3": "2", "layer4": "3"}
in_channels_stage2 = backbone.inplanes // 8
in_channels_list = [
in_channels_stage2,
in_channels_stage2 * 2,
in_channels_stage2 * 4,
in_channels_stage2 * 8,
]
out_channel = 256
if use_attn == 1:
return BackboneWithAFPN(backbone, return_layers,
in_channels_list, out_channel)
else:
return BackboneWithFPN(backbone, return_layers,
in_channels_list, out_channel)
def densenet_fpn_backbone(backbone_name, pretrained):
backbone = densenet.__dict__[backbone_name](
pretrained=pretrained
)
for name, param in backbone.features.named_parameters():
if "denseblock" not in name and "transition" not in name:
param.requires_grad_(False)
# in_channels_list = [128, 256, 896, 1920]
in_channels_list = {
'densenet121': [128, 256, 512, 1024],
'densenet161': [192, 384, 1056, 2208],
'densenet169': [128, 256, 896, 1920],
}
in_channels_list = in_channels_list[backbone_name]
out_channel = 256
return BackboneWithFPNForDensenet(backbone,
in_channels_list,
out_channel)
if __name__ == "__main__":
import torch
x = torch.randn(1, 3, 224, 224)
net = resnet_fpn_backbone("resnet50", True)
# net = densenet_fpn_backbone("densenet161", True)
out = net(x)
import ipdb;ipdb.set_trace()
| [
"[email protected]"
] | |
4d4a74d4003bc1e926a3fae3b93b30311cdc107c | 0244e54a42231399e84b40ca8b5b70fb75f92e13 | /crm/views.py | 2f75f50e52fda92722603394dab4375ecf555591 | [] | no_license | wkoonings/maverickfoodservicewillem | 43371cd367d4b8d0d483aa4e70809cedeea2a431 | f5605c4714a7a6d9cf937d9ee076e41f912c65e4 | refs/heads/master | 2022-11-30T02:22:02.485874 | 2019-11-17T03:06:08 | 2019-11-17T03:06:08 | 212,730,718 | 0 | 0 | null | 2022-11-22T02:40:42 | 2019-10-04T03:44:43 | HTML | UTF-8 | Python | false | false | 7,429 | py | from django.contrib.auth.decorators import login_required
from django.shortcuts import render, get_object_or_404, redirect
from .models import *
from .forms import *
from django.db.models import Sum
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from .serializers import CustomerSerializer
now = timezone.now()
def home(request):
return render(request, 'crm/home.html', {'crm': home})
@login_required()
def customer_delete(request, pk):
customer = get_object_or_404(Customer, pk=pk)
customer.delete()
return redirect('crm:customer_list')
@login_required()
def customer_list(request):
customer = Customer.objects.filter(created_date__lte=timezone.now())
return render(request, 'crm/customer_list.html', {'customers': customer})
@login_required()
def customer_edit(request, pk):
customer = get_object_or_404(Customer, pk=pk)
if request.method == "POST":
# update
form = CustomerForm(request.POST, instance=customer)
if form.is_valid():
customer = form.save(commit=False)
customer.updated_date = timezone.now()
customer.save()
customer = Customer.objects.filter(created_date__lte=timezone.now())
return render(request, 'crm/customer_list.html',
{'customers': customer})
else:
# edit
form = CustomerForm(instance=customer)
return render(request, 'crm/customer_edit.html', {'form': form})
@login_required()
def service_list(request):
service = Service.objects.filter(created_date__lte=timezone.now())
return render(request, 'crm/service_list.html', {'services': service})
@login_required()
def service_new(request):
if request.method == "POST":
form = ServiceForm(request.POST)
if form.is_valid():
service = form.save(commit=False)
service.create_date = timezone.now()
service.save()
services = Service.objects.filter(created_date__lte=timezone.now())
return render(request, 'crm/service_list.html', {'services': services})
else:
form = ServiceForm()
return render(request, 'crm/service_new.html', {'form': form})
@login_required()
def service_edit(request, pk):
service = get_object_or_404(Service, pk=pk)
if request.method == "POST":
form = ServiceForm(request.POST, instance=service)
if form.is_valid():
service = form.save(commit=False)
service.updated_date = timezone.now()
service.save()
service = Service.objects.filter(created_date__lte=timezone.now())
return render(request, 'crm/service_list.html', {'services': service})
else:
form = ServiceForm(instance=service)
return render(request, 'crm/service_edit.html', {'form': form})
@login_required()
def service_delete(request, pk):
service = get_object_or_404(Service, pk=pk)
service.delete()
return redirect('crm:service_list')
@login_required()
def product_list(request):
product = Product.objects.filter(created_date__lte=timezone.now())
return render(request, 'crm/product_list.html', {'products': product})
@login_required()
def product_new(request):
if request.method == "POST":
form = ProductForm(request.POST)
if form.is_valid():
product = form.save(commit=False)
product.create_date = timezone.now()
product.save()
products = Product.objects.filter(created_date__lte=timezone.now())
return render(request, 'crm/product_list.html', {'products': products})
else:
form = ProductForm()
return render(request, 'crm/product_new.html', {'form': form})
@login_required()
def product_delete(request, pk):
product = get_object_or_404(Product, pk=pk)
product.delete()
return redirect('crm:product_list')
@login_required()
def product_edit(request, pk):
product = get_object_or_404(Product, pk=pk)
if request.method == "POST":
form = ProductForm(request.POST, instance=product)
if form.is_valid():
product = form.save(commit=False)
product.updated_date = timezone.now()
product.save()
products = Product.objects.filter(created_date__lte=timezone.now())
return render(request, 'crm/product_list.html', {'products': products})
else:
form = ProductForm(instance=product)
return render(request, 'crm/product_edit.html', {'form': form})
@login_required()
def summary(request, pk):
customer = get_object_or_404(Customer, pk=pk)
customers = Customer.objects.filter(created_date__lte=timezone.now())
services = Service.objects.filter(cust_name=pk)
products = Product.objects.filter(cust_name=pk)
sum_service_charge = Service.objects.filter(cust_name=pk).aggregate(Sum('service_charge'))
sum_product_charge = Product.objects.filter(cust_name=pk).aggregate(Sum('charge'))
return render(request, 'crm/summary.html', {'customers': customers,
'products': products,
'services': services,
'sum_service_charge': sum_service_charge,
'sum_product_charge': sum_product_charge})
@login_required()
def dashboard(request):
return render(request,
'crm/dashboard.html',
{'section': 'dashboard'})
def register(request):
if request.method == 'POST':
user_form = UserRegistrationForm(request.POST)
if user_form.is_valid():
# Create a new user object but avoid saving it yet
new_user = user_form.save(commit=False)
# Set the chosen password
new_user.set_password(
user_form.cleaned_data['password'])
# Save the User object
new_user.save()
Profile.objects.create(user=new_user)
return render(request,
'crm/register_done.html',
{'new_user': new_user})
else:
user_form = UserRegistrationForm()
return render(request,
'crm/register.html',
{'user_form': user_form})
@login_required
def edit(request):
if request.method == 'POST':
user_form = UserEditForm(instance=request.user,
data=request.POST)
profile_form = ProfileEditForm(
instance=request.user.profile,
data=request.POST,
files=request.FILES)
if user_form.is_valid() and profile_form.is_valid():
user_form.save()
profile_form.save()
else:
user_form = UserEditForm(instance=request.user)
profile_form = ProfileEditForm(
instance=request.user.profile)
return render(request,
'crm/edit.html',
{'user_form': user_form,
'profile_form': profile_form})
class CustomerList(APIView):
def get(self, request):
customers_json = Customer.objects.all()
serializer = CustomerSerializer(customers_json, many=True)
return Response(serializer.data) | [
"[email protected]"
] | |
f464d9bcbc96f968b327af9d7673c249df0ed42b | 70fa997255d87e343a1bf21167ac7b1a5d52fa5b | /pizza_project/pizza/pizza/urls.py | 8390cb3ffc1d22fde98aa66e417a734bf05c3365 | [] | no_license | nikivay1/pizza | e20ae6df362096d70ce894b71c79cab4f865a797 | c7f241ab54886597b0daf4147f862166e57f8fc8 | refs/heads/master | 2022-12-21T07:11:58.684603 | 2020-09-29T06:32:12 | 2020-09-29T06:32:12 | 299,525,641 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,690 | py | from django.conf import settings
from django.conf.urls import url, include
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views.i18n import JavaScriptCatalog, javascript_catalog
from django.conf.urls import (
handler400, handler403, handler404, handler500
)
handler404 = 'core.views.page_404'
handler500 = 'core.views.page_500'
js_info_dict = {
'packages': 'core+catalog+users+cart',
'domain': 'django'
}
urlpatterns = [
url(r'^sitemap\.htm$',
TemplateView.as_view(template_name='sitemap.htm'),
name='sitemap-htm'),
url(r'^robots\.txt$', TemplateView.as_view(template_name='robots.txt',
content_type='text/plain'),
name='robots'),
url(r'^sitemap\.xml$', TemplateView.as_view(template_name='sitemap.xml',
content_type='text/xml'),
name='sitemap'),
url(r'^admin/rosetta', include('rosetta.urls')),
url(r'^jet/', include('jet.urls', 'jet')),
url(r'^admin/', admin.site.urls),
url(r'^api/catalog/', include('catalog.urls', namespace='catalog-api')),
url(r'^api/users/', include('users.urls', namespace='users-api')),
url(r'^api/cart/', include('cart.urls', namespace='cart-api')),
url(r'^api/core/', include('core.urls.api', namespace='core-api')),
url(r'^i18n/', include('django.conf.urls.i18n')),
url(r'^jsi18n/$', javascript_catalog, js_info_dict, name='javascript-catalog'),
url(r'^', include('core.urls'))
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | [
"[email protected]"
] | |
3782ab695f705b3ffac765f0c694caf4de5d1a8a | c88e5659ddb88ef6bcc52045ec48eb54458a351c | /NewWordExtractor/libs/Nlp.py | 480228ec6ab9559768d2fd7d336f907fa47250bd | [] | no_license | yyfsXZ/machine_learning_projects | 0e71e9ddc6558bdec90c829bc463c9e56433ba1c | 9c5b46f87afc6a0b7597c9f5feb9cc797b84e5a6 | refs/heads/master | 2020-05-29T21:47:16.442665 | 2019-06-18T07:17:05 | 2019-06-18T07:17:05 | 189,392,798 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | #!/usr/env/bin python
#coding=utf-8
import jieba
class Nlp(object):
def __init__(self):
pass
def wordseg(self, query):
return [word.encode("utf-8") for word in jieba.cut(query)] | [
"[email protected]"
] | |
0d74cc1c1d6b0e6f49e96b743d49334c1f550195 | a6a539d0f8fde24434f90b3dedad6b1144066145 | /djangotest/account/views.py | 87fc1f36dd6b5f4f5eb1be1e6d3368ba879d6031 | [] | no_license | linneudm/djangotest | 5ddd1db2430e5119fe544b343bc81ee9e20cf15d | cb73259aa3e7a99b0cacbfd0d863ead87704cb65 | refs/heads/master | 2022-12-10T12:33:32.475341 | 2019-10-29T11:24:16 | 2019-10-29T11:24:16 | 176,003,475 | 0 | 1 | null | 2022-12-08T04:52:34 | 2019-03-16T17:33:53 | JavaScript | UTF-8 | Python | false | false | 2,405 | py | '''
from django.contrib.auth.decorators import login_required, permission_required
from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin
from django.contrib.messages.views import SuccessMessageMixin
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.urls import reverse_lazy, reverse
from django.views.generic import ListView
from django.views.generic.edit import CreateView, UpdateView
from django.contrib.auth import get_user_model
from djangotest.account.forms import UserForm, UserUpdateForm
@login_required
@permission_required('auth.delete_user', raise_exception=True)
def user_active_or_disable(request, pk):
user = get_user_model().objects.get(pk=pk)
if user.pk is not request.user.pk:
status = not user.is_active
user.is_active = status
user.save()
messages.success(request, 'O usuário {} foi {} com sucesso.'.format(user.username, 'ativado' if status else 'desativado'))
else:
messages.error(request, 'Não é possível desativar o próprio usuário.')
return HttpResponseRedirect(reverse('account:user-list'))
class UserListView(LoginRequiredMixin, PermissionRequiredMixin, ListView):
permission_required = 'auth.view_user'
raise_exception = True
model = get_user_model()
template_name = 'account/user/list.html'
context_object_name = 'users'
class UserCreateView(LoginRequiredMixin, PermissionRequiredMixin, SuccessMessageMixin, CreateView):
permission_required = ('auth.add_user', 'auth.view_user')
raise_exception = True
model = get_user_model()
form_class = UserForm
template_name = 'account/user/form.html'
success_url = reverse_lazy('account:user-list')
success_message = "O usuário %(username)s foi criado com sucesso."
def form_valid(self, form):
form.instance.set_password(form.instance.password)
return super(UserCreateView, self).form_valid(form)
class UserUpdateView(LoginRequiredMixin, PermissionRequiredMixin, SuccessMessageMixin, UpdateView):
permission_required = ('auth.change_user', 'auth.view_user')
raise_exception = True
model = get_user_model()
form_class = UserUpdateForm
template_name = 'account/user/form.html'
success_url = reverse_lazy('account:user-list')
success_message = "O usuário %(username)s foi atualizado com sucesso."
''' | [
"[email protected]"
] | |
30acd6fabbb86e2029fe9bdb373bcb1912239b99 | 7b4820948845f55274b211d676ab8a6253a6298b | /addons/plugin.video.phstreams/default.py | 165d263833b1730268ad874343597e83e0a9e838 | [] | no_license | bopopescu/mw | 524c57d4b859751e298b907a12e44e9711ef72a6 | 5ef2acea0fb4150578e53201463c6bc5da37be20 | refs/heads/master | 2021-05-30T19:33:11.750160 | 2016-01-11T05:28:46 | 2016-01-11T05:28:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,159 | py | # -*- coding: utf-8 -*-
'''
Phoenix Add-on
Copyright (C) 2015 Blazetamer
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import urlparse,sys
params = dict(urlparse.parse_qsl(sys.argv[2].replace('?','')))
try:
action = params['action']
except:
action = None
try:
name = params['name']
except:
name = '0'
try:
url = params['url']
except:
url = '0'
try:
playable = params['playable']
except:
playable = '0'
try:
content = params['content']
except:
content = '0'
try:
tvshow = params['tvshow']
except:
tvshow = '0'
try:
audio = params['audio']
except:
audio = '0'
try:
image = params['image']
except:
image = '0'
try:
fanart = params['fanart']
except:
fanart = '0'
if action == None:
from resources.lib.indexers import phstreams
phstreams.getCategory()
elif action == 'dmode' or action == 'ndmode':
from resources.lib.indexers import phstreams
phstreams.getDirectory(name, url, audio, image, fanart, playable, content)
elif action == 'subDirectory':
from resources.lib.indexers import phstreams
phstreams.subDirectory(name, url, audio, image, fanart, playable, tvshow, content)
elif action == 'localDirectory':
from resources.lib.indexers import phstreams
phstreams.localDirectory()
elif action == 'search':
from resources.lib.indexers import phstreams
phstreams.getSearch()
elif action == 'searchDirectory':
from resources.lib.indexers import phstreams
phstreams.searchDirectory()
elif action == 'searchDirectory2':
from resources.lib.indexers import phstreams
phstreams.searchDirectory(url)
elif action == 'clearSearch':
from resources.lib.indexers import phstreams
phstreams.clearSearch()
elif action == 'resolveUrl':
from resources.lib.indexers import phstreams
phstreams.resolveUrl(name, url, audio, image, fanart, playable, content)
elif action == 'openDialog':
from resources.lib.libraries import phdialogs
phdialogs.openDialog(url,audio)
elif action == 'openSettings':
from resources.lib.libraries import control
control.openSettings()
elif action == 'addView':
from resources.lib.libraries import views
views.addView(content)
elif action == 'downloader':
from resources.lib.libraries import downloader
downloader.downloader()
elif action == 'addDownload':
from resources.lib.libraries import downloader
downloader.addDownload(name,url,image)
elif action == 'removeDownload':
from resources.lib.libraries import downloader
downloader.removeDownload(url)
elif action == 'startDownload':
from resources.lib.libraries import downloader
downloader.startDownload()
elif action == 'startDownloadThread':
from resources.lib.libraries import downloader
downloader.startDownloadThread()
elif action == 'stopDownload':
from resources.lib.libraries import downloader
downloader.stopDownload()
elif action == 'statusDownload':
from resources.lib.libraries import downloader
downloader.statusDownload()
elif action == 'trailer':
from resources.lib.libraries import trailer
trailer.trailer().play(name)
elif action == 'clearCache':
from resources.lib.libraries import cache
cache.clear()
elif action == 'radioDirectory':
from resources.lib.indexers import phradios
phradios.radioDirectory()
elif action == 'radioResolve':
from resources.lib.indexers import phradios
phradios.radioResolve(name, url, image)
elif action == 'radio1fm':
from resources.lib.indexers import phradios
phradios.radio1fm(image, fanart)
elif action == 'radio181fm':
from resources.lib.indexers import phradios
phradios.radio181fm(image, fanart)
elif action == 'radiotunes':
from resources.lib.indexers import phradios
phradios.radiotunes(image, fanart)
elif action == 'Kickinradio':
from resources.lib.indexers import phradios
phradios.Kickinradio(image, fanart)
elif action == 'Kickinradiocats':
from resources.lib.indexers import phradios
phradios.Kickinradiocats(url, image, fanart)
elif action == 'CartoonDirectory':
from resources.lib.indexers import phtoons
phtoons.CartoonDirectory()
elif action == 'CartoonCrazy':
from resources.lib.indexers import phtoons
phtoons.CartoonCrazy(image, fanart)
elif action == 'CCsearch':
from resources.lib.indexers import phtoons
phtoons.CCsearch(url, image, fanart)
elif action == 'CCcat':
from resources.lib.indexers import phtoons
phtoons.CCcat(url, image, fanart)
elif action == 'CCpart':
from resources.lib.indexers import phtoons
phtoons.CCpart(url, image, fanart)
elif action == 'CCstream':
from resources.lib.indexers import phtoons
phtoons.CCstream(url)
elif action == 'nhlDirectory':
from resources.lib.indexers import nhlcom
nhlcom.nhlDirectory()
elif action == 'nhlScoreboard':
from resources.lib.indexers import nhlcom
nhlcom.nhlScoreboard()
elif action == 'nhlArchives':
from resources.lib.indexers import nhlcom
nhlcom.nhlArchives()
elif action == 'nhlStreams':
from resources.lib.indexers import nhlcom
nhlcom.nhlStreams(name,url)
elif action == 'nhlResolve':
from resources.lib.indexers import nhlcom
nhlcom.nhlResolve(url)
| [
"[email protected]"
] | |
0b0a32e3829836ea18e677b11ff16c958c14a807 | f28f038e641c0c4c446dd9b5c4b9baad9787be26 | /python assignments/Assignment2/19075043/20.py | aa888a2bcfc0784fe5fc512a57928ba7768f6835 | [] | no_license | king-11/Information-Technology-Workshop | 0122b9f155efa35f4a1415307c6056e7e6716c3b | 7890c3308f5cda8004b6f1edc89d4e4dc0f81d42 | refs/heads/master | 2023-06-02T03:20:18.337247 | 2021-06-20T17:56:11 | 2021-06-20T17:56:11 | 275,173,232 | 2 | 11 | null | 2021-06-20T17:56:12 | 2020-06-26T14:19:15 | Python | UTF-8 | Python | false | false | 120 | py | import sys
x = input("Enter objects ").split()
for a in x:
print(f"Memory size of \'{a}\' is {sys.getsizeof(a)}")
| [
"[email protected]"
] | |
1c207a0144c0ae25da72a066a7c9bc0f7036d41c | fef7cfae5c35e7248a7057a83f9763c686a6397c | /exercises/6 - Challenges/A - Guess the number.py | 135ad32e8270374cd50b9bed72451becb3e642fc | [] | no_license | beajmnz/IEDSbootcamp | 41710965712d7650d101cbe910ea061b1cbb4c2b | 0b03a7c54711df469025602847c0f2be8541d234 | refs/heads/main | 2023-06-06T03:16:25.690091 | 2021-06-25T10:41:39 | 2021-06-25T10:41:39 | 360,922,286 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 858 | py | #! /usr/bin/python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 17:16:20 2021
@author: Bea Jimenez <[email protected]>
"""
"""
Guess the number
================
The player is asked to guess a number between 1 and 100, picked randomly by the program.
Each time they propose an answer, the program tells them if their answer is correct, too low, or too high.
The player has an unlimited number of attempts (or, in a variant, a limited number of attempts).
"""
import random
nr2Bguessed = random.randint(1, 100)
attempt = int(
input(
"I have thought of a number between 1 and 100. Try and guess it!\nGive a number\n"
)
)
while attempt != nr2Bguessed:
if attempt < nr2Bguessed:
attempt = int(input("Too low. Guess again\n"))
else:
attempt = int(input("Too high. Guess again\n"))
print("You did it!")
| [
"[email protected]"
] | |
847cf96f3f1e798f655128cfef4a1e6b7c30401a | 1e86e0b27357366182f0c1c0113c8c667990c2f5 | /2739.py | 4548ff89619a222ace7d3ff49c1f5e611780d277 | [
"MIT"
] | permissive | kwondohun0308/Beakjoon | 2a9302ae166162a2fd8d0aa41fd6acf209b5a2bb | 65e8c1015bcb3bb757d8056525034ee333ecb681 | refs/heads/main | 2023-08-16T03:48:28.432954 | 2021-10-14T12:30:27 | 2021-10-14T12:30:27 | 384,434,098 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 73 | py | N=input()
N=int(N)
for i in range(1,10):
print(N,'*',i,'=',N*i)
| [
"[email protected]"
] | |
718e436c09f196413b130c80af8fa649bc506d25 | 3f225584602cc7fd2610911dc728a4d40a33348c | /convolutional-pose-machines-tensorflow/run_training.py | d1ac8f053626a7acd58478a37ee0a0e30e297274 | [
"Apache-2.0"
] | permissive | cccvt/HandPoseEstimation | 2f5e6b6e27be92de98a5a77bf060a723daa89d80 | 13745c572d87892a8ce332ff0dd7309ab0603b99 | refs/heads/master | 2020-03-26T19:22:18.058680 | 2018-06-05T06:07:09 | 2018-06-05T06:07:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,728 | py | import tensorflow as tf
import numpy as np
import cv2
import os
import importlib
import time
from utils import cpm_utils
from config import FLAGS
import Ensemble_data_generator
cpm_model = importlib.import_module('models.nets.' + FLAGS.network_def)
def main(argv):
"""
:param argv:
:return:
"""
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
""" Create dirs for saving models and logs
"""
model_path_suffix = os.path.join(FLAGS.network_def,
'input_{}_output_{}'.format(FLAGS.input_size, FLAGS.heatmap_size),
'joints_{}'.format(FLAGS.num_of_joints),
'stages_{}'.format(FLAGS.cpm_stages),
'init_{}_rate_{}_step_{}'.format(FLAGS.init_lr, FLAGS.lr_decay_rate,
FLAGS.lr_decay_step)
)
model_save_dir = os.path.join('models',
'weights',
model_path_suffix)
train_log_save_dir = os.path.join('models',
'logs',
model_path_suffix,
'train')
test_log_save_dir = os.path.join('models',
'logs',
model_path_suffix,
'test')
os.system('mkdir -p {}'.format(model_save_dir))
os.system('mkdir -p {}'.format(train_log_save_dir))
os.system('mkdir -p {}'.format(test_log_save_dir))
""" Create data generator
"""
# g = Ensemble_data_generator.ensemble_data_generator(FLAGS.train_img_dir,
# FLAGS.bg_img_dir,
# FLAGS.batch_size, FLAGS.input_size, True, True,
# FLAGS.augmentation_config, FLAGS.hnm, FLAGS.do_cropping)
# g_eval = Ensemble_data_generator.ensemble_data_generator(FLAGS.val_img_dir,
# FLAGS.bg_img_dir,
# FLAGS.batch_size, FLAGS.input_size, True, True,
# FLAGS.augmentation_config, FLAGS.hnm, FLAGS.do_cropping)
g = Ensemble_data_generator.ensemble_data_generator("train.tfrecords", 5, 256, 64)
g_eval = Ensemble_data_generator.ensemble_data_generator("test.tfrecords", 5, 256, 64)
""" Build network graph
"""
model = cpm_model.CPM_Model(input_size=FLAGS.input_size,
heatmap_size=FLAGS.heatmap_size,
stages=FLAGS.cpm_stages,
joints=FLAGS.num_of_joints,
img_type=FLAGS.color_channel,
is_training=True)
model.build_loss(FLAGS.init_lr, FLAGS.lr_decay_rate, FLAGS.lr_decay_step, optimizer='RMSProp')
print('=====Model Build=====\n')
merged_summary = tf.summary.merge_all()
""" Training
"""
device_count = {'GPU': 1} if FLAGS.use_gpu else {'GPU': 0}
with tf.Session(config=tf.ConfigProto(device_count=device_count,
allow_soft_placement=True)) as sess:
# Create tensorboard
train_writer = tf.summary.FileWriter(train_log_save_dir, sess.graph)
test_writer = tf.summary.FileWriter(test_log_save_dir, sess.graph)
# Create model saver
saver = tf.train.Saver(max_to_keep=None)
# Init all vars
init_op = tf.global_variables_initializer()
sess.run(init_op)
# Restore pretrained weights
if FLAGS.pretrained_model != '':
if FLAGS.pretrained_model.endswith('.pkl'):
model.load_weights_from_file(FLAGS.pretrained_model, sess, finetune=True)
# Check weights
for variable in tf.trainable_variables():
with tf.variable_scope('', reuse=True):
var = tf.get_variable(variable.name.split(':0')[0])
print(variable.name, np.mean(sess.run(var)))
else:
saver.restore(sess, os.path.join(model_save_dir, FLAGS.pretrained_model))
# check weights
for variable in tf.trainable_variables():
with tf.variable_scope('', reuse=True):
var = tf.get_variable(variable.name.split(':0')[0])
print(variable.name, np.mean(sess.run(var)))
#saver.restore(sess, 'models/weights/cpm_hand')
for training_itr in range(FLAGS.training_iters):
t1 = time.time()
# Read one batch data
batch_x_np, batch_joints_np = g.next()
batch_x_np, batch_joints_np = sess.run([batch_x_np, batch_joints_np])
if FLAGS.normalize_img:
# Normalize images
batch_x_np = batch_x_np / 255.0 - 0.5
else:
batch_x_np -= 128.0
# Generate heatmaps from joints
batch_gt_heatmap_np = cpm_utils.make_heatmaps_from_joints(FLAGS.input_size,
FLAGS.heatmap_size,
FLAGS.joint_gaussian_variance,
batch_joints_np)
# Forward and update weights
stage_losses_np, total_loss_np, _, summaries, current_lr, \
stage_heatmap_np, global_step = sess.run([model.stage_loss,
model.total_loss,
model.train_op,
merged_summary,
model.cur_lr,
model.stage_heatmap,
model.global_step
],
feed_dict={model.input_images: batch_x_np,
model.gt_hmap_placeholder: batch_gt_heatmap_np})
# Show training info
print_current_training_stats(global_step, current_lr, stage_losses_np, total_loss_np, time.time() - t1)
# Write logs
train_writer.add_summary(summaries, global_step)
# TODO: each validation data set, do prediction
# Draw intermediate results
if (global_step + 1) % 100 == 0:
if FLAGS.color_channel == 'GRAY':
demo_img = np.repeat(batch_x_np[0], 3, axis=2)
if FLAGS.normalize_img:
demo_img += 0.5
else:
demo_img += 128.0
demo_img /= 255.0
elif FLAGS.color_channel == 'RGB':
if FLAGS.normalize_img:
demo_img = batch_x_np[0] + 0.5
else:
demo_img += 128.0
demo_img /= 255.0
else:
raise ValueError('Non support image type.')
demo_stage_heatmaps = []
for stage in range(FLAGS.cpm_stages):
demo_stage_heatmap = stage_heatmap_np[stage][0, :, :, 0:FLAGS.num_of_joints].reshape(
(FLAGS.heatmap_size, FLAGS.heatmap_size, FLAGS.num_of_joints))
demo_stage_heatmap = cv2.resize(demo_stage_heatmap, (FLAGS.input_size, FLAGS.input_size))
demo_stage_heatmap = np.amax(demo_stage_heatmap, axis=2)
demo_stage_heatmap = np.reshape(demo_stage_heatmap, (FLAGS.input_size, FLAGS.input_size, 1))
demo_stage_heatmap = np.repeat(demo_stage_heatmap, 3, axis=2)
demo_stage_heatmaps.append(demo_stage_heatmap)
demo_gt_heatmap = batch_gt_heatmap_np[0, :, :, 0:FLAGS.num_of_joints].reshape(
(FLAGS.heatmap_size, FLAGS.heatmap_size, FLAGS.num_of_joints))
demo_gt_heatmap = cv2.resize(demo_gt_heatmap, (FLAGS.input_size, FLAGS.input_size))
demo_gt_heatmap = np.amax(demo_gt_heatmap, axis=2)
demo_gt_heatmap = np.reshape(demo_gt_heatmap, (FLAGS.input_size, FLAGS.input_size, 1))
demo_gt_heatmap = np.repeat(demo_gt_heatmap, 3, axis=2)
if FLAGS.cpm_stages > 4:
upper_img = np.concatenate((demo_stage_heatmaps[0], demo_stage_heatmaps[1], demo_stage_heatmaps[2]),
axis=1)
if FLAGS.normalize_img:
blend_img = 0.5 * demo_img + 0.5 * demo_gt_heatmap
else:
blend_img = 0.5 * demo_img / 255.0 + 0.5 * demo_gt_heatmap
lower_img = np.concatenate((demo_stage_heatmaps[FLAGS.cpm_stages - 1], demo_gt_heatmap, blend_img),
axis=1)
demo_img = np.concatenate((upper_img, lower_img), axis=0)
#cv2.imshow('current heatmap', (demo_img * 255).astype(np.uint8))
#cv2.waitKey(1000)
cv2.imwrite("/home/qiaohe/convolutional-pose-machines-tensorflow/validation_img/" + str(global_step) + ".jpg", demo_img * 255)
else:
upper_img = np.concatenate((demo_stage_heatmaps[FLAGS.cpm_stages - 1], demo_gt_heatmap, demo_img),
axis=1)
#cv2.imshow('current heatmap', (upper_img * 255).astype(np.uint8))
#cv2.waitKey(1000)
cv2.imwrite("/home/qiaohe/convolutional-pose-machines-tensorflow/validation_img/" + str(global_step) + ".jpg", upper_img * 255)
if (global_step + 1) % FLAGS.validation_iters == 0:
mean_val_loss = 0
cnt = 0
while cnt < 10:
batch_x_np, batch_joints_np = g_eval.next()
batch_x_np, batch_joints_np = sess.run([batch_x_np, batch_joints_np])
# Normalize images
batch_x_np = batch_x_np / 255.0 - 0.5
batch_gt_heatmap_np = cpm_utils.make_heatmaps_from_joints(FLAGS.input_size,
FLAGS.heatmap_size,
FLAGS.joint_gaussian_variance,
batch_joints_np)
total_loss_np, summaries = sess.run([model.total_loss, merged_summary],
feed_dict={model.input_images: batch_x_np,
model.gt_hmap_placeholder: batch_gt_heatmap_np})
mean_val_loss += total_loss_np
cnt += 1
print('\nValidation loss: {:>7.2f}\n'.format(mean_val_loss / cnt))
test_writer.add_summary(summaries, global_step)
# Save models
if (global_step + 1) % FLAGS.model_save_iters == 0:
saver.save(sess=sess, save_path=model_save_dir + '/' + FLAGS.network_def.split('.py')[0],
global_step=(global_step + 1))
print('\nModel checkpoint saved...\n')
# Finish training
if global_step == FLAGS.training_iters:
break
print('Training done.')
def print_current_training_stats(global_step, cur_lr, stage_losses, total_loss, time_elapsed):
stats = 'Step: {}/{} ----- Cur_lr: {:1.7f} ----- Time: {:>2.2f} sec.'.format(global_step, FLAGS.training_iters,
cur_lr, time_elapsed)
losses = ' | '.join(
['S{} loss: {:>7.2f}'.format(stage_num + 1, stage_losses[stage_num]) for stage_num in range(FLAGS.cpm_stages)])
losses += ' | Total loss: {}'.format(total_loss)
print(stats)
print(losses + '\n')
if __name__ == '__main__':
tf.app.run()
| [
"[email protected]"
] |
Subsets and Splits