blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
โ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d731a6e23ccd6f7645c4e6653560793e812b1aa1 | 81c96f00a92d8bf094e6234bc70e03df4dff0641 | /IntialTowerofHonai.py | bdcb1467d30d8bc73509fe8bac7ff8378e238ab2 | [] | no_license | rkankanala26/Python_Class | ce41a079d41703493c23548e8041ea013d6e8725 | 410291933f04e369f387a610c050fd1f982e4a79 | refs/heads/master | 2022-12-22T12:36:03.991818 | 2020-09-27T16:11:05 | 2020-09-27T16:11:05 | 273,601,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | # create Method
def drawPole():
for x in range(1,5,1):
print (" |")
def drawBase():
print('----------')
def drawTitle(title):
print(f" {title}")
def drawTower(title):
drawPole()
drawBase( )
drawTitle(title)
drawTower("Tower-A")
drawTower("Tower-B")
drawTower("Tower-C") | [
"[email protected]"
] | |
bb20d089871e928764cadb53ca4b96f3bbc36eb0 | 4f6c695d1d570130a40dfe8193a53f040dc6bd4b | /simpleApp/urls.py | 2d8965f56567904eb7bdc135b7824915215efe86 | [] | no_license | Midonika/SimpleApp | cc098d14fbd74339ccabf8cd35619aa41ebb3caf | 325555cff037a941a944e451129ccbbdd739175d | refs/heads/master | 2022-12-21T04:47:56.158094 | 2019-03-19T18:52:40 | 2019-03-19T18:52:40 | 176,401,765 | 0 | 0 | null | 2022-12-19T21:10:36 | 2019-03-19T01:51:42 | Python | UTF-8 | Python | false | false | 751 | py | """SimpleApp URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| [
"[email protected]"
] | |
57790d9d1660759cda52eb5ff93e7b321e6e4779 | 6c9c1ed5593316ca7bebe864eb308e16ca7843dd | /backend/dating/admin.py | ca4d973f149746d4b2d2b74ef709b78e86337430 | [] | no_license | crowdbotics-apps/test-name-27012 | d6d5ec07c0303492935be833520dae473fbef2fa | 4396befc206c8369ed4bf92ae2f209309afb5d67 | refs/heads/master | 2023-05-01T01:49:53.105821 | 2021-05-20T21:32:21 | 2021-05-20T21:32:21 | 369,342,396 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | from django.contrib import admin
from .models import Setting, Like, UserPhoto, Match, Dislike, Inbox, Profile
admin.site.register(Setting)
admin.site.register(Profile)
admin.site.register(UserPhoto)
admin.site.register(Match)
admin.site.register(Inbox)
admin.site.register(Dislike)
admin.site.register(Like)
# Register your models here.
| [
"[email protected]"
] | |
96e7faf6119b74fe1c16d036b92f79969eae1c30 | 061c36c4b33dd0c47d9d62c2057559d4c5973681 | /impala_tables_metadata.py | 50ff43fc2d4853b426d43eed02478e993d6efc04 | [
"MIT"
] | permissive | ashkankamyab/DevOps-Python-tools | 0847f9e1b74d7864d17b0a9833beeef1f149e5a5 | dc4b1ce2b2fbee3797b66501ba3918a900a79769 | refs/heads/master | 2022-10-09T15:23:31.108086 | 2022-09-01T14:32:56 | 2022-09-01T14:32:56 | 189,855,037 | 1 | 0 | NOASSERTION | 2019-06-02T14:15:18 | 2019-06-02T14:15:18 | null | UTF-8 | Python | false | false | 2,899 | py | #!/usr/bin/env python
# vim:ts=4:sts=4:sw=4:et
#
# Author: Hari Sekhon
# Date: 2019-11-26 10:08:52 +0000 (Tue, 26 Nov 2019)
#
# https://github.com/HariSekhon/DevOps-Python-tools
#
# License: see accompanying Hari Sekhon LICENSE file
#
# If you're using my code you're welcome to connect with me on LinkedIn
# and optionally send me feedback to help steer this or other code I publish
#
# https://www.linkedin.com/in/HariSekhon
#
"""
Connect to an Impala daemon and print the first matching DDL metadata field (eg. 'Location')
for each table in each database, or only those matching given db / table regexes
Examples (fields are case sensitive regex and return N/A without match):
./impala_tables_metadata.py --field Location ...
./impala_tables_metadata.py --field SerDe ...
Caveats:
Hive is more reliable as Impala breaks on some table metadata definitions where Hive doesn't
Impala is faster than Hive for the first ~1000 tables but then slows down
so if you have a lot of tables I recommend you use the Hive version of this instead
eg. by ~1900 tables the Hive version will overtake the Impala version and
for thousands of tables Impala actuallys runs 1.5 - 2x slower than the Hive version overall
Tested on Impala 2.7.0, 2.12.0 on CDH 5.10, 5.16 with Kerberos and SSL
Due to a thrift / impyla bug this needs exactly thrift==0.9.3, see
https://github.com/cloudera/impyla/issues/286
If you get an error like this:
ERROR:impala.hiveserver2:Failed to open transport (tries_left=1)
...
TTransportException: TSocket read 0 bytes
then check your --kerberos and --ssl settings match the cluster's settings
(Thrift and Kerberos have the worst error messages ever)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
srcdir = os.path.abspath(os.path.dirname(__file__))
pylib = os.path.join(srcdir, 'pylib')
sys.path.append(pylib)
try:
# pylint: disable=wrong-import-position
from hive_tables_metadata import HiveTablesMetadata
except ImportError as _:
print('module import failed: %s' % _, file=sys.stderr)
print("Did you remember to build the project by running 'make'?", file=sys.stderr)
print("Alternatively perhaps you tried to copy this program out without it's adjacent libraries?", file=sys.stderr)
sys.exit(4)
__author__ = 'Hari Sekhon'
__version__ = '0.4.0'
class ImpalaTablesMetadata(HiveTablesMetadata):
def __init__(self):
# Python 2.x
super(ImpalaTablesMetadata, self).__init__()
# Python 3.x
# super().__init__()
# these are auto-set checking sys.argv[0] in HiveImpalaCLI class
self.name = 'Impala'
#self.default_port = 21050
#self.default_service_name = 'impala'
if __name__ == '__main__':
ImpalaTablesMetadata().main()
| [
"[email protected]"
] | |
3f0fd941c0800c2b2b894fd6b9e47ebbab48945e | 7db743838326f39497c766a0156647e99dab116e | /manage.py | a76f18702d441b16f6e901f62ef5f437cc78a0b8 | [] | no_license | mailchimp-mark/mm-mailchimp-code-sample | e721160b36b5b70627b2b4fed676686fdb03b90e | 553fe843d947eafd4b71d4c8ed51b65d4f9bb53c | refs/heads/master | 2020-03-26T20:43:37.988188 | 2018-08-19T22:44:44 | 2018-08-20T17:14:56 | 145,341,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 810 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pokemon_proj.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
1c5815de7339c8d47d410f4e24898c0b84c9975a | cd250e98a8c4d1d54dcdc301d30a1243f2b3fbaf | /orders/models.py | 6efab805254d1078ae8e7e566948e5e20188c546 | [] | no_license | Loreeh/webdevproj | 95d54d66518c2459e62acaefc6b377c847bafd04 | b08252b497a997beebad91caf6ab8adfa2a11440 | refs/heads/master | 2021-07-12T17:43:51.244988 | 2017-10-16T11:48:24 | 2017-10-16T11:48:24 | 107,119,148 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 777 | py | from django.db import models
# Create your models here.
class Product(models.Model):
product_name = models.CharField(max_length=200)
product_details = models.TextField()
price = models.IntegerField()
active = models.IntegerField(default='1')
product_manu = models.CharField(max_length=100)
category = models.CharField(max_length=20)
def __str__(self):
return '%s (%s tk)' % (self.product_name, self.price)
class Order (models.Model):
name = models.CharField(max_length=200)
phone = models.CharField(max_length=20)
address = models.TextField()
delivery_date = models.DateField(blank=True)
product_id = models.ForeignKey(Product)
payment_option = models.CharField(max_length=50)
order_status = models.CharField(max_length=50)
quantity = models.IntegerField()
| [
"[email protected]"
] | |
69bea90d36986d5bc09258b3f1e370efa7359471 | eb40dce4039d528b9cd06dbeda75da09d09d7fc5 | /need_install/Django-1.8.17/tests/model_formsets/models.py | e30c063bc9f480ec655ad816b6909f771a24965f | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | MulticsYin/MulticsSH | 39b62189446787c7f0f037b1640c9c780bd1dddd | 5837a0bff0e7da0e8535e4e0b31ef6baf24274b4 | refs/heads/master | 2021-08-28T07:53:51.759679 | 2017-12-11T15:31:03 | 2017-12-11T15:31:03 | 82,428,902 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 7,607 | py | from __future__ import unicode_literals
import datetime
import uuid
from django.db import models
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Author(models.Model):
name = models.CharField(max_length=100)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
class BetterAuthor(Author):
write_speed = models.IntegerField()
@python_2_unicode_compatible
class Book(models.Model):
author = models.ForeignKey(Author)
title = models.CharField(max_length=100)
class Meta:
unique_together = (
('author', 'title'),
)
ordering = ['id']
def __str__(self):
return self.title
def clean(self):
# Ensure author is always accessible in clean method
assert self.author.name is not None
@python_2_unicode_compatible
class BookWithCustomPK(models.Model):
my_pk = models.DecimalField(max_digits=5, decimal_places=0, primary_key=True)
author = models.ForeignKey(Author)
title = models.CharField(max_length=100)
def __str__(self):
return '%s: %s' % (self.my_pk, self.title)
class Editor(models.Model):
name = models.CharField(max_length=100)
@python_2_unicode_compatible
class BookWithOptionalAltEditor(models.Model):
author = models.ForeignKey(Author)
# Optional secondary author
alt_editor = models.ForeignKey(Editor, blank=True, null=True)
title = models.CharField(max_length=100)
class Meta:
unique_together = (
('author', 'title', 'alt_editor'),
)
def __str__(self):
return self.title
@python_2_unicode_compatible
class AlternateBook(Book):
notes = models.CharField(max_length=100)
def __str__(self):
return '%s - %s' % (self.title, self.notes)
@python_2_unicode_compatible
class AuthorMeeting(models.Model):
name = models.CharField(max_length=100)
authors = models.ManyToManyField(Author)
created = models.DateField(editable=False)
def __str__(self):
return self.name
class CustomPrimaryKey(models.Model):
my_pk = models.CharField(max_length=10, primary_key=True)
some_field = models.CharField(max_length=100)
# models for inheritance tests.
@python_2_unicode_compatible
class Place(models.Model):
name = models.CharField(max_length=50)
city = models.CharField(max_length=50)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Owner(models.Model):
auto_id = models.AutoField(primary_key=True)
name = models.CharField(max_length=100)
place = models.ForeignKey(Place)
def __str__(self):
return "%s at %s" % (self.name, self.place)
class Location(models.Model):
place = models.ForeignKey(Place, unique=True)
# this is purely for testing the data doesn't matter here :)
lat = models.CharField(max_length=100)
lon = models.CharField(max_length=100)
@python_2_unicode_compatible
class OwnerProfile(models.Model):
owner = models.OneToOneField(Owner, primary_key=True)
age = models.PositiveIntegerField()
def __str__(self):
return "%s is %d" % (self.owner.name, self.age)
@python_2_unicode_compatible
class Restaurant(Place):
serves_pizza = models.BooleanField(default=False)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Product(models.Model):
slug = models.SlugField(unique=True)
def __str__(self):
return self.slug
@python_2_unicode_compatible
class Price(models.Model):
price = models.DecimalField(max_digits=10, decimal_places=2)
quantity = models.PositiveIntegerField()
def __str__(self):
return "%s for %s" % (self.quantity, self.price)
class Meta:
unique_together = (('price', 'quantity'),)
class MexicanRestaurant(Restaurant):
serves_tacos = models.BooleanField(default=False)
class ClassyMexicanRestaurant(MexicanRestaurant):
restaurant = models.OneToOneField(MexicanRestaurant, parent_link=True, primary_key=True)
tacos_are_yummy = models.BooleanField(default=False)
# models for testing unique_together validation when a fk is involved and
# using inlineformset_factory.
@python_2_unicode_compatible
class Repository(models.Model):
name = models.CharField(max_length=25)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Revision(models.Model):
repository = models.ForeignKey(Repository)
revision = models.CharField(max_length=40)
class Meta:
unique_together = (("repository", "revision"),)
def __str__(self):
return "%s (%s)" % (self.revision, six.text_type(self.repository))
# models for testing callable defaults (see bug #7975). If you define a model
# with a callable default value, you cannot rely on the initial value in a
# form.
class Person(models.Model):
name = models.CharField(max_length=128)
class Membership(models.Model):
person = models.ForeignKey(Person)
date_joined = models.DateTimeField(default=datetime.datetime.now)
karma = models.IntegerField()
# models for testing a null=True fk to a parent
class Team(models.Model):
name = models.CharField(max_length=100)
@python_2_unicode_compatible
class Player(models.Model):
team = models.ForeignKey(Team, null=True)
name = models.CharField(max_length=100)
def __str__(self):
return self.name
# Models for testing custom ModelForm save methods in formsets and inline formsets
@python_2_unicode_compatible
class Poet(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Poem(models.Model):
poet = models.ForeignKey(Poet)
name = models.CharField(max_length=100)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Post(models.Model):
title = models.CharField(max_length=50, unique_for_date='posted', blank=True)
slug = models.CharField(max_length=50, unique_for_year='posted', blank=True)
subtitle = models.CharField(max_length=50, unique_for_month='posted', blank=True)
posted = models.DateField()
def __str__(self):
return self.name
# Models for testing UUID primary keys
class UUIDPKParent(models.Model):
uuid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=255)
class UUIDPKChild(models.Model):
uuid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=255)
parent = models.ForeignKey(UUIDPKParent)
class ChildWithEditablePK(models.Model):
name = models.CharField(max_length=255, primary_key=True)
parent = models.ForeignKey(UUIDPKParent)
class AutoPKChildOfUUIDPKParent(models.Model):
name = models.CharField(max_length=255)
parent = models.ForeignKey(UUIDPKParent)
class AutoPKParent(models.Model):
name = models.CharField(max_length=255)
class UUIDPKChildOfAutoPKParent(models.Model):
uuid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=255)
parent = models.ForeignKey(AutoPKParent)
class ParentWithUUIDAlternateKey(models.Model):
uuid = models.UUIDField(unique=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=50)
class ChildRelatedViaAK(models.Model):
name = models.CharField(max_length=255)
parent = models.ForeignKey(to=ParentWithUUIDAlternateKey, to_field='uuid')
| [
"[email protected]"
] | |
762453d7afec32523b6ddfeb1ea7ef306ba9c1dc | bb2616a954e1cafe5875abd39a6af81cdeb019f4 | /bisectlib/bisect.py | 59888e4b292aef1a46b1e6bd44676e72019aa3d7 | [
"MIT"
] | permissive | llewelld/bisect | 1780a2b367b96de4485b52e3a3fadfd915fc3ee7 | cc918b5cc66dccdcef91257e5b0ea96076770db4 | refs/heads/master | 2023-01-06T21:41:29.376546 | 2022-12-25T14:11:11 | 2022-12-25T14:11:11 | 242,596,182 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,280 | py | import json
import math
from bisectlib.regfunc import RegFunc
################################################
# Weight classes
class Distance:
"""
Distance calculation base class
"""
start = 0
base = 0
def __init__(self):
pass
def compile_weights(self, summary, commits):
pass
def compile_sub_weights(self, start, base):
self.start = start
self.base = base
def weight(self, commit):
return 1
def distance(self, commit1, commit2):
distance = 0
if commit2 < commit1:
commit1, commit2 = commit2, commit1
pos = commit1
end = commit2
while pos < end:
distance += self.weight(pos)
pos += 1
return distance
class DistanceCached(Distance):
"""
Distance calculation base class with caching for reducing calculation time
"""
weights = []
distances = []
start = 0
base = 0
summary = {}
commits = {}
def compile_weights(self, summary, commits):
self.summary = summary
self.commits = commits
pass
def compile_sub_weights(self, start, base):
self.start = start
self.base = base
self.distances.clear()
pos = start
weight = 0
while pos <= base:
self.distances.append(weight)
weight += self.weight(pos)
pos += 1
def distance(self, commit1, commit2):
distance = 0
if commit2 < commit1:
commit1, commit2 = commit2, commit1
return self.distances[commit2 - self.start] - self.distances[commit1 - self.start]
class DistanceCommits(DistanceCached):
"""
Distance calculation using number of commits as the distance metric
"""
pass
class DistanceLines(Distance):
"""
Distance calculation using number of lines changed as the distance metric
"""
summary = {}
commits = {}
def compile_weights(self, summary, commits):
self.summary = summary
self.commits = commits
pass
def weight(self, pos):
amount = (self.summary[self.commits[pos]]["lines_added"] + self.summary[self.commits[pos]]["lines_removed"])
if amount <= 0:
amount = 1
return amount
class DistanceBlocks(Distance):
"""
Distance calculation using number of changed hunks as the distance metric
"""
summary = {}
commits = {}
def compile_weights(self, summary, commits):
self.summary = summary
self.commits = commits
def weight(self, pos):
amount = self.summary[self.commits[pos]]["blocks_changed"]
if amount <= 0:
amount = 1
return amount
class DistanceFiles(Distance):
"""
Distance calculation using number of files changed as the distance metric
"""
summary = {}
commits = {}
def compile_weights(self, summary, commits):
self.summary = summary
self.commits = commits
def weight(self, pos):
amount = self.summary[self.commits[pos]]["files_changed"]
if amount <= 0:
amount = 1
return amount
class DistanceCommitsRegFunc(DistanceCached):
"""
Distance calculation using number of commits as the distance metric,
weighted using the function provided at initialisation
"""
regfunc = None
def __init__(self, regfunc):
self.regfunc = regfunc
def weight(self, commit):
x = (commit - self.start) / (self.base - self.start)
y = self.regfunc.apply(x)
if y < 1:
y = 1
return y
class DistanceLinesRegFunc(DistanceCached):
"""
Distance calculation using number of lines changed as the distance metric,
weighted using the function provided at initialisation
"""
regfunc = None
lines_changed = []
def __init__(self, regfunc):
self.regfunc = regfunc
def weight(self, commit):
x = self.lines_changed[commit - self.start] / self.lines_changed[self.base - self.start]
y = self.regfunc.apply(x)
if y < 1:
y = 1
return y
def compile_sub_weights(self, start, base):
self.start = start
self.base = base
self.distances.clear()
self.lines_changed.clear()
total_changed = 0
pos = start
while pos <= base:
amount = (self.summary[self.commits[pos]]["lines_added"] + self.summary[self.commits[pos]]["lines_removed"])
if amount <= 0:
amount = 1
total_changed += amount
self.lines_changed.append(total_changed)
pos += 1
pos = start
weight = 0
while pos <= base:
self.distances.append(weight)
weight += self.weight(pos)
pos += 1
class DistanceBlocksRegFunc(DistanceCached):
"""
Distance calculation using number of changed hunks as the distance metric,
weighted using the function provided at initialisation
"""
regfunc = None
blocks_changed = []
def __init__(self, regfunc):
self.regfunc = regfunc
def weight(self, commit):
x = self.blocks_changed[commit - self.start] / self.blocks_changed[self.base - self.start]
y = self.regfunc.apply(x)
if y < 1:
y = 1
return y
def compile_sub_weights(self, start, base):
self.start = start
self.base = base
self.distances.clear()
self.blocks_changed.clear()
total_changed = 0
pos = start
while pos <= base:
amount = (self.summary[self.commits[pos]]["blocks_changed"])
if amount <= 0:
amount = 1
total_changed += amount
self.blocks_changed.append(total_changed)
pos += 1
pos = start
weight = 0
while pos <= base:
self.distances.append(weight)
weight += self.weight(pos)
pos += 1
################################################
# Bisect algorithm
class Bisect:
"""
Perform the bisect algorithm on a set of collected data files using the
provided distance metric
"""
distanceFunction = None
def __init__(self, distanceFunction):
self.distanceFunction = distanceFunction
def interpolate(self, commit1, commit2, factor):
if commit2 < commit1:
commit1, commit2, factor = commit2, commit1, 1 - factor
interval = self.distanceFunction.distance(commit1, commit2)
offset = math.floor(interval * factor + 0.5)
travelled = 0
pos = commit1
while travelled < offset:
travelled += self.distanceFunction.weight(pos)
pos += 1
return pos
def bisect(self, start, base, target):
# target is bad
lowest = start # Bad
highest = base # Good
count = 0
while highest > lowest + 1: # Invariant
current = self.interpolate(lowest + 1, highest - 1, 0.5)
if current >= highest:
current = highest - 1
else:
if current <= lowest:
current = lowest - 1
if current <= target:
# lowest remains bad
lowest = current
else:
# highest remains good
highest = current
count += 1
return count
def load_data(self, file_in):
print('Loading data from {}'.format(file_in))
data = {'order': [], 'dict': {}}
try:
with open(file_in, 'r') as file_in:
data = json.load(file_in)
except Exception:
print('File {} could not be read'.format(file_in))
return data
def analyse(self, filein):
data = self.load_data(filein)
count = 0
total = 0
stats = []
if 'order' in data and 'dict' in data:
commits = data['order']
summary = data['dict']
print("Number of commits: {}".format(len(commits)))
reverts = []
for commit in commits:
if summary[commit] and 'reverts' in summary[commit]:
revert = summary[commit]['reverts']
base = summary[commit]['base']
testdata = dict()
if commit in summary and base in summary and revert in summary:
testdata["start"] = summary[commit]['position']
testdata["base"] = summary[base]['position']
testdata["target"] = summary[revert]['position']
if (testdata["start"] < testdata["target"]) and (testdata["target"] <= testdata["base"]):
reverts.append(testdata)
else:
print("Skipping revert due to inconsistent inequalities")
else:
print("Skipping due to missing data")
progress = 0
self.distanceFunction.compile_weights(summary, commits)
for testdata in reverts:
self.distanceFunction.compile_sub_weights(testdata["start"], testdata["base"])
steps = self.bisect(testdata["start"], testdata["base"], testdata["target"])
stat = {}
stat["distance"] = self.distanceFunction.distance(testdata["start"], testdata["base"])
stat["target"] = self.distanceFunction.distance(testdata["start"], testdata["target"])
stat["commits"] = testdata["base"] - testdata["start"]
stat["steps"] = steps
stats.append(stat)
total += steps
count += 1
progress += 1
if len(reverts) > 100 and progress % 100 == 0:
print("Progress {}%".format(round(100 * progress / len(reverts))))
else:
print("File contains no data")
return count, total, stats
| [
"[email protected]"
] | |
1005778d6a449087a6291c8200c7a2bc6d744ef1 | 96d16c014d934ef0df85c01cee3ec70dcf4fad43 | /Gerador_de_grafos.py | 79fce886fb2171a8ed080165027df29e4aac9282 | [] | no_license | educastro/Optimal_Communication_Spanning_Tree | 62459d61387d5bbccfd36a6b61c78f319ef929fc | 6a3613e58ecdccbe841a43116d45742ab9830fcd | refs/heads/main | 2023-08-08T17:15:51.961316 | 2021-09-11T12:05:04 | 2021-09-11T12:05:04 | 405,279,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 982 | py | '''
Cรณdigo desenvolvido enquanto implementaรงรฃo do artigo "O problema da รกrvore de abrangรชncia de comunicaรงรฃo ideal (Optimum Communication Spanning Tree) e suas diferentes soluรงรตes" submetido no WPOS 2021.
Todos os autores possuem vรญnculo com o Programa de Pรณs-Graduaรงรฃo em Computaรงรฃo Aplicada da Universidade de Brasรญlia, sendo os seguintes:
- Eduardo Castro
- Flรกvio Martins
- Leonardo Oliveira
- Ilo Cรฉsar
- Edison Ishikawa
O cรณdigo a seguir tem como objetivo principal a geraรงรฃo de grafos.
'''
n=0
g={}
# Coleta de input contendo a quantidade de nรณs desejados
n=int(input())
# For para geraรงรฃo do grafo
for x in range(n):
g[x]={}
for y in range(n):
if y!= x:
# Definiรงรฃo do valor para cada conexรฃo entre nรณs
g[x][y] = 5
print('grafo=',g)
print('grafo_req =',g)
'''
n=0
g={}
n=int(input())
for x in range(n-1):
g[x]={}
g[x][x+1] = 5
g[x+1]={}
g[x+1][0] = 5
print('grafo=',g)
print('grafo_req =',g)
'''
| [
"[email protected]"
] | |
32f35eeb02312d87e913896cf752226793b65daf | c3fb5e09330454322b622b2a758cae94dbfca386 | /nn/epinn3-1x.py | 3df5d66602e234142401e918fea33f97ff0f4682 | [] | no_license | senen2/epilepsia | b0cfb1dac07df160eaf780c02612f97d11f2efb7 | bdf756c43397fece04f25027c98a42d30385444b | refs/heads/master | 2021-01-18T22:23:15.349219 | 2016-12-04T18:01:16 | 2016-12-04T18:01:16 | 72,490,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 742 | py | '''
Created on Oct 21, 2016
@author: botpi
'''
import numpy as np
import scipy.io
from apiepi import *
from apiepi import sigmoid
print "begin"
group = "resp_1-1"
resp = scipy.io.loadmat(group)
group = "train_1"
images, labels = read_images(group)
# print resp["W"], resp["b"]
# print resp["W"].shape, resp["b"].shape
x1 = sigmoid(images.dot(resp["W1"]) + resp["b1"])
pred = sigmoid(x1.dot(resp["W2"]) + resp["b2"])
#print pred, resp["b"]
print np.sum(np.argmax(pred, 1)), np.sum(np.argmax(labels, 1))
correct_prediction = np.equal(np.argmax(pred, 1), np.argmax(labels, 1))
print np.sum(correct_prediction), np.sum(correct_prediction) / labels.shape[0]
accuracy = np.mean((correct_prediction))
print accuracy
print 1128/1267.
print "end" | [
"[email protected]"
] | |
df11949dff8ce9df435455389573853a0327d934 | 78efa54b2b253f99ea7e073f783e6121c20cdb52 | /Codechef/Chef and Feedback.py | 9a29d4b277de8cde69b2bfbdd41880f1d3c4b005 | [] | no_license | NishchaySharma/Competitve-Programming | 32a93581ab17f05d20129471f7450f34ec68cc53 | 1ec44324d64c116098eb0beb74baac7f1c3395bb | refs/heads/master | 2020-04-08T04:02:46.599398 | 2020-01-01T15:51:39 | 2020-01-01T15:51:39 | 159,000,529 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | t=int(input())
for test in range(t):
s=input()
if '101' in s or '010' in s:
print('Good')
else:
print('Bad')
| [
"[email protected]"
] | |
11ad975086b70453ff1ac5a621d60080f6d30c2d | c444674bec908358292dd2f6474f8955bf2c048c | /Exercise-Sheet-09/system.py | 706a251709fde18cdfe525cf82103ef1c80c9c36 | [
"MIT"
] | permissive | mgarbellini/Computational-Physics-Material-Science | ab111743f59d1468e0df0b1f163f918dc20db66e | 08f456e94cc27dd2a9bf47ae9c793c0b7342b4bc | refs/heads/main | 2023-06-16T02:42:54.527465 | 2021-07-17T11:47:39 | 2021-07-17T11:47:39 | 362,033,405 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,373 | py | #!/usr/bin/env python3
"""
@author: M. Garbellini
@email: [email protected]
* SYSTEM MODULE *
Contains the system variables
Latest update: June 2nd 2021
"""
import numpy as np
"""System variables"""
ensemble = None # type of ensemble ("microcanonical, NVT, NHT")
dim = None # dimension of the system (2D or 3D)
alat = None # Lattice parameter
rho = None # Number density
p = None
L = None # Box dimensions (per edge)
"""Particles variables"""
mass = None
n = None # Number of particles for given axis n.shape = (3,)
N = None
pos = None
vel = None
"""Molecular system variables"""
mask = None # np array of dimension (M, 2), where mask[0,:] = [index[0], index[1]]
M = None # number of molecules
r0 = None
"""Force variables"""
force = None
f_wall_dw = None # force on lower wall
f_wall_up = None # force on upper wall
external_force = False
"""Energy and thermodynamics variables"""
energy = None
kinetic = None
potential = None
T = None
kt = None # Isothermal compressibility
cv = None # Heat capacity
"""Nose-Hoover Thermostat specific variables"""
Q = None # Termal mass
lns = None # Lagrangian fictitous degree of freedom (log of s)
xi = None
G = None
nose_hoover = None # Energy contribution of the NH thermostat
"""Thermostat/Barostat variables"""
virial = None #Internal virial
pressure = None #Pressure of the system
| [
"[email protected]"
] | |
4a0132865252b185b84f72cb39a27e1abd0c2bf0 | 9f79c4f9a8a9154fc3dc9202ab8ed2547a722b5f | /PracticeExamMid/National court.py | 16010f552a3c11d598731928b528a6ee886ce44d | [] | no_license | grigor-stoyanov/PythonFundamentals | 31b6da00bd8294e8e802174dca4e62b231134090 | 5ae5f1f1b9ca9500d10e95318a731d3b29950a30 | refs/heads/main | 2023-02-11T12:17:19.010596 | 2021-01-14T22:14:54 | 2021-01-14T22:14:54 | 321,658,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 326 | py | first_employee = int(input())
second_employee = int(input())
third_employee = int(input())
clients = int(input())
hours = 0
while clients > 0:
hours += 1
if not hours % 4 == 0:
clients_per_hour = first_employee+second_employee+third_employee
clients -= clients_per_hour
print(f'Time needed: {hours}h.') | [
"[email protected]"
] | |
cd3f6f8b649183f225de6e3ecf5a6435529a2fde | a8aa8ecebda6c3bad4a27854d29371312cb152f8 | /src/ggrc/utils/structures.py | 83478b4e5dd5ae179fde1ee430cdd21795a76b7e | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | xferra/ggrc-core | ef1f7016c717a391d927c128b2058e1fee6e2929 | b82333664db3978d85109f2d968239bd1260ee85 | refs/heads/develop | 2023-04-06T23:59:38.917995 | 2016-07-26T14:13:38 | 2016-07-26T14:13:38 | 64,231,198 | 1 | 1 | Apache-2.0 | 2023-04-03T23:37:20 | 2016-07-26T15:10:29 | Python | UTF-8 | Python | false | false | 3,177 | py | # Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Collection if ggrc specific structures."""
import collections
class CaseInsensitiveDict(collections.MutableMapping):
"""Case insensitive default dict implementation.
This is a modification of requests.structures.CaseInsensitiveDict so that it
works with all types of keys and that it can return a default value when a
non existing key is accessed.
"""
def __init__(self, data=None, **kwargs):
self._store = dict()
if data is None:
data = {}
self.update(data, **kwargs)
def __getitem__(self, key):
return self._store[self._key(key)][1]
def __setitem__(self, key, value):
"""Save the key value pair and remember the actual key."""
self._store[self._key(key)] = (key, value)
def __delitem__(self, key):
del self._store[self._key(key)]
def __iter__(self):
return (casedkey for casedkey, mappedvalue in self._store.values())
def __len__(self):
return len(self._store)
def __eq__(self, other):
"""Check if the items in both dicts are the same.
Case is ignored in comparing keys but not when comparing values.
Args:
other: Case insensitive default dict that we want to compare to this one.
Returns:
True if all key value pairs match in both dicts where key comparison is
case insensitive.
"""
if isinstance(other, collections.Mapping):
other = CaseInsensitiveDefaultDict(other)
else:
return NotImplemented
# Compare insensitively
return dict(self.lower_items()) == dict(other.lower_items())
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, dict(self.items()))
@classmethod
def _key(cls, key):
return key.lower() if isinstance(key, basestring) else key
def lower_items(self):
"""Get items where all keys are lower case."""
return ((lowerkey, keyval[1]) for lowerkey, keyval in self._store.items())
def copy(self):
return CaseInsensitiveDefaultDict(self._default, data=self._store.values())
class CaseInsensitiveDefaultDict(CaseInsensitiveDict):
"""Case insensitive default dict implementation.
This is a modification of requests.structures.CaseInsensitiveDict so that it
works with all types of keys and that it can return a default value when a
non existing key is accessed.
"""
def __init__(self, _default, data=None, **kwargs):
self._default = _default
super(CaseInsensitiveDefaultDict, self).__init__(data, **kwargs)
def __missing__(self, key):
"""Set a new missing value and return it."""
if self._default:
self._store[self._key(key)] = (key, self._default())
return self._store[self._key(key)][1]
else:
raise KeyError(key)
def __getitem__(self, key):
"""Get an item if it exists or return the default specified value."""
try:
return self._store[self._key(key)][1]
except KeyError:
return self.__missing__(key)
def __contains__(self, key):
return self._key(key) in self._store
def copy(self):
return CaseInsensitiveDefaultDict(self._default, data=self._store.values())
| [
"[email protected]"
] | |
70d68dd26c3e7a03a1b1caeb48cd6fb07a5b4e81 | 907a9071997e578e1b528262790f81c8b8b6ba98 | /widgets/run_remote_command_sink.py | 96bf59706c9804c3ca4b5d3f1448b1392d6104b2 | [] | no_license | aseering/pipeworks | 23fdc8b6b300d29e8d52c6ab7ad4526b490a8c94 | 31016845d667ce384ca39ad91c53c0577bc4f1f8 | refs/heads/master | 2021-01-13T15:16:50.810313 | 2016-12-22T05:15:40 | 2016-12-22T05:15:40 | 76,226,288 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,786 | py | from widget_types import DataPipe
from urlparse import urlparse, parse_qs
from threading import Thread
import paramiko
DEFAULT_BLOCK_SZ = 4 * 1024 * 1024
class RunRemoteCommandSink(DataPipe):
"""
Read data in from stdin
"""
@classmethod
def supportedProtocols(cls):
return {"exec-ssh"}
def __init__(self, uri):
self._uri = uri
@property
def stream(self):
uri = urlparse(self._uri)
params = parse_qs(uri.query, keep_blank_values=True)
cmd = uri.path
# exec-ssh://example.com/cat means "cat"
# exec-ssh://example.com/bin/echo means "/bin/echo"
if cmd.rfind("/") == 0:
cmd = cmd[1:]
client = paramiko.client.SSHClient()
client.load_system_host_keys()
if "insecure_accept_key" in params:
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(uri.hostname, uri.port, uri.username, uri.password)
stdin, stdout, stderr = client.exec_command(cmd)
# Feed stdin in a separate thread so we don't deadlock on ourselves.
# We could alternatively use 'select()', but not on Windows.
def feed_stdin():
for data in self._source.stream:
stdin.write(data)
stdin.close()
# Work around BUG:
# https://github.com/paramiko/paramiko/issues/322
stdin.channel.shutdown_write()
stdin_feeder_thread = Thread(target=feed_stdin)
stdin_feeder_thread.daemon = True
stdin_feeder_thread.start()
while True:
data = stdout.read(DEFAULT_BLOCK_SZ)
if not data:
return
yield data
| [
"[email protected]"
] | |
a7705095e841a93d923b059b889d953a870089af | 227c26f39fb898834af6b16912186a4ca9c6331d | /web/tests/test.py | 1c88121faee16bdef16d8199ab4f3877b87191f0 | [] | no_license | MarcosNBJ/CoronaNews | 14558bd4223a431921105c7560d21920dffa3595 | 3c3d0a108da55fad018e348bb2d29a4024507ccd | refs/heads/master | 2023-01-10T16:17:31.212797 | 2020-11-13T22:50:19 | 2020-11-15T07:47:37 | 257,811,184 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,724 | py | from app import app
import unittest
import re
class HomeEndpointTests(unittest.TestCase):
def setUp(self):
'''
Setting up response to be home-page's response
'''
CoronaNewsApp = app.test_client()
self.response = CoronaNewsApp.get('/')
def test_get(self):
'''
Testing response status code to see if request is successful
'''
self.assertEqual(200, self.response.status_code)
def test_content_type(self):
'''
Checking if the template loaded correctly
'''
self.assertIn('text/html', self.response.content_type)
def test_bootstrap_css(self):
'''
Making sure bootstrap loaded correctly
'''
response_str = self.response.data.decode('utf-8')
self.assertIn('bootstrap.min.css', response_str)
self.assertIn('bootstrap.min.js', response_str)
def test_content(self):
'''
Testing if the template was loaded correctly
'''
response_str = self.response.data.decode('utf-8')
self.assertIn(
'Noticias mais recentes sobre o Coronavรญrus no Brasil', str(response_str))
class RegionEndpointTests(unittest.TestCase):
def setUp(self):
'''
Setting up response to be home-page's response
'''
CoronaNewsApp = app.test_client()
region = 'DF'
self.response = CoronaNewsApp.get(f'/{region}')
def test_content(self):
'''
Testing if the template was loaded accordingly to the region
'''
response_str = self.response.data.decode('utf-8')
self.assertIn(
'DF</button>', re.sub(r"[\n\t\s]*", "", str(response_str)))
| [
"[email protected]"
] | |
73b30fa6fa0f6df8bb02c9afbdac48872b446ba9 | 236932e8a722e2c6b2d99cb788629b6fffcddf6f | /app/api/repository/admin.py | 00af0ac2b6407fc1a3ae19add5bfc9dd8e08bf52 | [
"MIT"
] | permissive | Mohammed785/Emergency-Numbers-fastapi | 44c9c01fbf428bebc8068ec6059b76f5dd342739 | 345a6a77eea36e5dcac34b103ddfe0f0a7d17bb6 | refs/heads/main | 2023-06-25T19:49:43.264881 | 2021-07-28T14:43:11 | 2021-07-28T14:43:11 | 390,382,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 690 | py | from sqlalchemy.orm import Session
from api import schemas,models
from fastapi import HTTPException,status
from api.hashing import Hash
def create_admin(request:schemas.Admin,db:Session):
new_admin = models.Admin(name=request.name,email=request.email,password=Hash.bcrypt(request.password))
db.add(new_admin)
db.commit()
db.refresh(new_admin)
return new_admin
def show_admin(id:int,db:Session):
admin = db.query(models.Admin).filter(models.Admin.id==id).first()
if not admin:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
detail=f"User with the id {id} is not available")
return admin
| [
"[email protected]"
] | |
c4f55290ac050d4d7702f82296d31175d355205c | c9167b470a1b88b13dd5b7816d1c4226b46830a0 | /src/data/decompress.py | ef7d1d04fb95faaf349d5ebdfc740120c233d862 | [
"MIT"
] | permissive | aldopareja/instacart_kaggle | 35e40652baf64b966dccdc9388cab35e32b0a3bb | 5d8c3c938cb0f1feabca60ac3c566147c2e5073c | refs/heads/master | 2021-01-21T18:34:06.006369 | 2017-05-23T09:28:13 | 2017-05-23T09:28:13 | 92,060,798 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,135 | py | import argparse, glob, os
import warnings as wr
import zipfile as zp
def main(inputDir,outputDir):
inputFiles = glob.glob(os.path.join(inputDir,'*.zip'))
if not os.path.exists(outputDir):
os.makedirs(outputDir)
else:
wr.warn('the output directory already exists\n directory will be cleaned')
out = glob.glob(os.path.join(outputDir,'*'))
for file in out:
os.remove(file)
for file in inputFiles:
file = zp.ZipFile(file, 'r')
file.extractall(outputDir)
file.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='script for decompressing the data files')
parser.add_argument('-i', '--input_directory', type=str,default=None ,help='The path to the input directory')
parser.add_argument('-o', '--output_directory', type=str,default=None ,help='The output directory')
'''
python decompress.py -i data/raw/
-o data/raw/decompressed
'''
args = parser.parse_args()
inputDir = args.input_directory
outputDir = args.output_directory
main(inputDir,outputDir) | [
"[email protected]"
] | |
8d58a097e187616f13d699d1b11dc320226dbd01 | fb39c779c7f05857865f57247b8a89bc81839154 | /tests/unit/test_model.py | 60bb4a7e443cdb30793938883e2e521a8f8437c8 | [
"BSD-3-Clause"
] | permissive | daoo/autodesk | 41339d596fb94ead1ed3bcff9b81c131a3d4e7dd | 0d7780d0cfff366ba51f222bec2ef41d24f3b776 | refs/heads/master | 2023-06-10T00:28:53.282480 | 2023-05-30T08:21:02 | 2023-05-30T08:21:02 | 87,717,309 | 13 | 3 | null | null | null | null | UTF-8 | Python | false | false | 5,534 | py | from autodesk.model import Model
from autodesk.sqlitedatastore import SqliteDataStore
from autodesk.states import UP, DOWN, ACTIVE, INACTIVE
from pandas import Timestamp, Timedelta
from pandas.testing import assert_frame_equal
from tests.stubdatastore import StubDataStore
import pandas as pd
import pytest
def make_spans(records):
return pd.DataFrame(records, columns=['start', 'end', 'state'])
@pytest.fixture()
def inmemory_model():
model = Model(SqliteDataStore(':memory:'))
yield model
model.close()
def test_get_desk_spans_empty():
t1 = Timestamp.min
t2 = Timestamp.max
model = Model(StubDataStore.empty())
result = model.get_desk_spans(t1, t2)
expected = make_spans([(t1, t2, DOWN)])
assert_frame_equal(result, expected)
def test_get_session_spans_empty():
t1 = Timestamp.min
t2 = Timestamp.max
model = Model(StubDataStore.empty())
result = model.get_session_spans(t1, t2)
expected = make_spans([(t1, t2, INACTIVE)])
assert_frame_equal(result, expected)
def test_get_desk_spans_one_up_span():
t1 = Timestamp(2018, 1, 1)
t2 = Timestamp(2018, 1, 2)
t3 = Timestamp(2018, 1, 3)
model = Model(StubDataStore(
session_events=[],
desk_events=[(t2, UP)]
))
result = model.get_desk_spans(t1, t3)
expected = make_spans([(t1, t2, DOWN), (t2, t3, UP)])
assert_frame_equal(result, expected)
def test_get_session_spans_one_active_span():
t1 = Timestamp(2018, 1, 1)
t2 = Timestamp(2018, 1, 2)
t3 = Timestamp(2018, 1, 3)
model = Model(StubDataStore(
session_events=[(t2, ACTIVE)],
desk_events=[]
))
result = model.get_session_spans(t1, t3)
expected = make_spans([(t1, t2, INACTIVE), (t2, t3, ACTIVE)])
assert_frame_equal(result, expected)
def test_get_session_state_empty():
model = Model(StubDataStore.empty())
assert model.get_session_state() == INACTIVE
def test_get_desk_state_empty():
model = Model(StubDataStore.empty())
assert model.get_desk_state() == DOWN
def test_get_active_time_empty():
model = Model(StubDataStore.empty())
assert model.get_active_time(Timestamp.min, Timestamp.max) == Timedelta(0)
def test_get_active_time_active_zero():
t = Timestamp(2018, 1, 1)
model = Model(StubDataStore(
session_events=[(t, ACTIVE)],
desk_events=[]
))
assert model.get_active_time(Timestamp.min, t) == Timedelta(0)
def test_get_active_time_active_for_10_minutes():
t1 = Timestamp(2018, 1, 1, 0, 0, 0)
t2 = Timestamp(2018, 1, 1, 0, 10, 0)
model = Model(StubDataStore(
session_events=[(t1, ACTIVE)],
desk_events=[]
))
assert model.get_active_time(Timestamp.min, t2) == Timedelta(minutes=10)
def test_get_active_time_just_after_desk_change():
t1 = Timestamp(2018, 1, 1, 0, 0, 0)
t2 = Timestamp(2018, 1, 1, 0, 10, 0)
model = Model(StubDataStore(
session_events=[(t1, ACTIVE)],
desk_events=[(t2, UP)]
))
assert model.get_active_time(Timestamp.min, t2) == Timedelta(0)
def test_get_active_time_active_20_minutes_with_changed_desk_state():
t1 = Timestamp(2018, 1, 1, 0, 0, 0)
t2 = Timestamp(2018, 1, 1, 0, 10, 0)
t3 = Timestamp(2018, 1, 1, 0, 20, 0)
model = Model(StubDataStore(
session_events=[(t1, ACTIVE)],
desk_events=[(t2, UP)]
))
assert model.get_active_time(Timestamp.min, t3) == Timedelta(minutes=10)
def test_compute_hourly_count_active_30_minutes():
t1 = Timestamp(2017, 4, 12, 10, 0, 0)
t2 = Timestamp(2017, 4, 12, 10, 30, 0)
model = Model(StubDataStore(
session_events=[(t1, ACTIVE), (t2, INACTIVE)],
desk_events=[]
))
result = model.compute_hourly_count(t1, t2)
specific_hour = result[
(result.weekday == 'Wednesday') & (result.hour == 10)
]
assert specific_hour.counts.iloc[0] == 1
def test_compute_hourly_count_active_0_minutes():
t1 = Timestamp(2017, 4, 12, 10, 0, 0)
t2 = Timestamp(2017, 4, 12, 10, 30, 0)
model = Model(StubDataStore(
session_events=[(t1, INACTIVE)],
desk_events=[]
))
result = model.compute_hourly_count(t1, t2)
assert result.counts.sum() == 0
def test_set_session_state_active(inmemory_model):
t1 = Timestamp(2018, 1, 1)
t2 = Timestamp(2018, 1, 2)
inmemory_model.set_session(t1, ACTIVE)
expected = make_spans([(t1, t2, ACTIVE)])
assert inmemory_model.get_session_state() == ACTIVE
assert_frame_equal(inmemory_model.get_session_spans(t1, t2), expected)
def test_set_session_state_inactive(inmemory_model):
t1 = Timestamp(2018, 1, 1)
t2 = Timestamp(2018, 1, 2)
inmemory_model.set_session(t1, INACTIVE)
expected = make_spans([(t1, t2, INACTIVE)])
assert inmemory_model.get_session_state() == INACTIVE
assert_frame_equal(inmemory_model.get_session_spans(t1, t2), expected)
def test_set_desk_state_up(inmemory_model):
t1 = Timestamp(2018, 1, 1)
t2 = Timestamp(2018, 1, 2)
inmemory_model.set_desk(t1, UP)
expected = make_spans([(t1, t2, UP)])
assert inmemory_model.get_desk_state() == UP
assert_frame_equal(inmemory_model.get_desk_spans(t1, t2), expected)
def test_set_desk_state_down(inmemory_model):
t1 = Timestamp(2018, 1, 1)
t2 = Timestamp(2018, 1, 2)
inmemory_model.set_desk(t1, DOWN)
expected = make_spans([(t1, t2, DOWN)])
assert inmemory_model.get_desk_state() == DOWN
assert_frame_equal(inmemory_model.get_desk_spans(t1, t2), expected)
| [
"[email protected]"
] | |
0f8f22afe79ca0f7ef6844d4058734115cf9216e | b79547dd150df2fd77a7b9a323623cd6a42b8e25 | /djpsilobus/bin/login_post.py | 87ffe40f7fe459651cd00cb51621e5d50ee66b23 | [
"MIT"
] | permissive | carthage-college/django-djpsilobus | 030d704d7345cdcb993f378c644fdfacb1fa5fff | 386bc17a845247be3049f98f4d38ff49cb54d60c | refs/heads/master | 2023-08-06T17:37:25.021935 | 2023-07-26T17:53:28 | 2023-07-26T17:53:28 | 62,468,994 | 2 | 0 | MIT | 2023-02-24T18:19:35 | 2016-07-02T20:41:30 | Python | UTF-8 | Python | false | false | 456 | py | from django.conf import settings
import json
import requests
url = "https://dspace.carthage.edu/rest/login"
email = settings.DSPACE_EMAIL
password = settings.DSPACE_PASSWORD
request_dict = {
"email":"{}".format(email),
"password":"{}".format(password)
}
headers = {'content-type': 'application/json'}
response = requests.post(url, data=json.dumps(request_dict), headers=headers)
#print response.__dict__
print "token:\n\n"
print response._content
| [
"[email protected]"
] | |
7be461064c746befdfb6cb0df87450ee4a003cea | a134172173dbc976004729b38ea8d7c081bf374d | /tests/components/arcam_fmj/test_device_trigger.py | 0f2cfaf28932ce3a32878f8e12243142ca282404 | [
"Apache-2.0"
] | permissive | uniosmarthome/home-assistant-core | 910cc44f53381222ffe5543b43940875046bc738 | dd56b1c17606fff40968940a27db6f64bb71abec | refs/heads/dev | 2023-04-13T11:20:24.592193 | 2021-04-11T07:30:08 | 2021-04-11T07:30:08 | 295,188,670 | 1 | 0 | Apache-2.0 | 2021-04-11T07:35:08 | 2020-09-13T16:14:03 | Python | UTF-8 | Python | false | false | 2,833 | py | """The tests for Arcam FMJ Receiver control device triggers."""
import pytest
from homeassistant.components.arcam_fmj.const import DOMAIN
import homeassistant.components.automation as automation
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
assert_lists_same,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
from tests.components.blueprint.conftest import stub_blueprint_populate # noqa
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_triggers(hass, device_reg, entity_reg):
"""Test we get the expected triggers from a arcam_fmj."""
config_entry = MockConfigEntry(domain=DOMAIN, data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
identifiers={(DOMAIN, "host", 1234)},
)
entity_reg.async_get_or_create(
"media_player", DOMAIN, "5678", device_id=device_entry.id
)
expected_triggers = [
{
"platform": "device",
"domain": DOMAIN,
"type": "turn_on",
"device_id": device_entry.id,
"entity_id": "media_player.arcam_fmj_5678",
},
]
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
assert_lists_same(triggers, expected_triggers)
async def test_if_fires_on_turn_on_request(hass, calls, player_setup, state):
"""Test for turn_on and turn_off triggers firing."""
state.get_power.return_value = None
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": player_setup,
"type": "turn_on",
},
"action": {
"service": "test.automation",
"data_template": {"some": "{{ trigger.entity_id }}"},
},
}
]
},
)
await hass.services.async_call(
"media_player",
"turn_on",
{"entity_id": player_setup},
blocking=True,
)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == player_setup
| [
"[email protected]"
] | |
566568612fe98c4ea477080279b41dc69ea8f43f | b2c7e3c42474d4285494c4933d11e5dbe419852b | /scrapy_pyppeteer_cloud_example/spiders/books.py | 734374de4726e450f3495bbc878d2b87d1b142c1 | [] | no_license | elacuesta/scrapy-pyppeteer-cloud-example | 274b49f77eb9ef23a0b98baae9dbc33499adab18 | b61e125369a3a19e5e819f2328616337e5280069 | refs/heads/master | 2022-09-08T17:33:51.367722 | 2020-05-28T23:12:45 | 2020-05-28T23:12:45 | 257,968,267 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 945 | py | import logging
from scrapy import Spider, Request
logging.getLogger("pyppeteer").setLevel(logging.INFO)
logging.getLogger("websockets").setLevel(logging.INFO)
class BooksSpider(Spider):
name = "books"
pyppeteer = False
def start_requests(self):
yield Request("http://books.toscrape.com", meta={"pyppeteer": self.pyppeteer})
def parse(self, response):
self.logger.info("Parsing page %s", response.url)
yield from response.follow_all(
css="article.product_pod h3 a",
callback=self.parse_book,
meta={"pyppeteer": self.pyppeteer},
)
yield from response.follow_all(css="li.next a", meta={"pyppeteer": self.pyppeteer})
def parse_book(self, response):
return {
"url": response.url,
"title": response.css("h1::text").get(),
"price": response.css("p.price_color::text").re_first(r"(\d+.?\d*)"),
}
| [
"[email protected]"
] | |
8919b63ee7591ecd3059914bcfe5811ad08e1727 | 46be8164d8c1b5649d893452b783fe61c1c674a8 | /materialize-css-django-material-admin/step1/djmaterial/djmaterial/urls.py | f490e948e44253b5f9cc7248ff6a53f1b6218590 | [
"MIT"
] | permissive | akashmurai/blog-code-examples | 751f5c2d68a7dcf29127ddefa2f508b3470bd93f | a6afcb874e88086686071aa1b2a47548aed5a2b0 | refs/heads/master | 2023-04-16T14:14:32.573829 | 2021-04-23T11:46:47 | 2021-04-23T11:46:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 752 | py | """djmaterial URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| [
"[email protected]"
] | |
1173a142111504e53493c4bd0165aebe53db3e8f | d266b3e57a31970b2b7a04de9682c88466b92e3c | /server.py | c09fb66ab0585ab1dccbd837758e6bbf26e11e52 | [
"MIT"
] | permissive | wangyidong3/stream_server | 617f6a99fe6d73827ebe734570119cdd70c674c7 | 08626af790b8c5c2067e849ba66efb848c72ab3e | refs/heads/master | 2021-03-13T23:27:20.820489 | 2020-03-16T22:27:55 | 2020-03-16T22:27:55 | 246,720,521 | 0 | 0 | MIT | 2020-03-15T02:02:40 | 2020-03-12T01:56:08 | Python | UTF-8 | Python | false | false | 2,036 | py | import socket
from threading import Thread
import base64
import numpy as np
import cv2
import pytest
# server
SERVER_IP = "0.0.0.0"
SERVER_PORT = 953
MAX_NUM_CONNECTIONS = 20
# image
IMAGE_HEIGHT = 480
IMAGE_WIDTH = 640
COLOR_PIXEL = 3 # RGB
class ConnectionPool(Thread):
def __init__(self, ip_, port_, conn_):
Thread.__init__(self)
self.ip = ip_
self.port = port_
self.conn = conn_
print("[+] New server socket thread started for " + self.ip + ":" + str(self.port))
def run(self):
count = 0
try:
while True:
data = self.conn.recv(IMAGE_HEIGHT * IMAGE_WIDTH * COLOR_PIXEL)
if not data:
break
print(count, len(data))
count += 1
except Exception as e:
print("Connection lost with " + self.ip + ":" + str(self.port) + "\r\n[Error] " + str(e.message))
self.conn.close()
def test_server():
x=5
y=6
assert x+1 == y,"test successed"
# assert x == y,"test failed"
def connection():
print("Waiting connections...")
socket_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
socket_server.bind((SERVER_IP, SERVER_PORT))
socket_server.listen(MAX_NUM_CONNECTIONS)
while True:
(conn, (ip, port)) = socket_server.accept()
thread = ConnectionPool(ip, port, conn)
thread.start()
socket_server.close()
camera.release()
if __name__ == '__main__':
print("Waiting connections...")
socket_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
socket_server.bind((SERVER_IP, SERVER_PORT))
socket_server.listen(MAX_NUM_CONNECTIONS)
while True:
(conn, (ip, port)) = socket_server.accept()
thread = ConnectionPool(ip, port, conn)
thread.start()
socket_server.close()
camera.release()
| [
"[email protected]"
] | |
ffd63545b4349859bd33d3aa16a8067b635fba86 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/421/usersdata/339/85926/submittedfiles/tomadas.py | 58100134123c7a8720056a903605fd365548af5d | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 265 | py | # -*- coding: utf-8 -*-
import math
#COMECE SEU CODIGO AQUI
T1= int(input('Entrada da rรฉgua1: '))
T2= int(input('Entrada da rรฉgua2: '))
T3= int(input('Entrada da rรฉgua3: '))
T4= int(input('Entrada da rรฉgua4: '))
Tn= (T1-1)+(T2-1)+(T3-1)+T4
print (Tn)
| [
"[email protected]"
] | |
ecaf7de961e10b10ff2ae37afcc53e369d519f69 | 2f378e9425dcc0d4c6f57497ea4e866311499f2f | /luceole_coop_db/models/share.py | d0d73ffb51f2f60e8e546e5b41737ffe5e982909 | [] | no_license | tcarion/luceole_git | 0adc3b1d7934726811c96d2544cba5222aa5eb71 | cc2af9cbe52cb66fd5d2375062ada6094f280943 | refs/heads/master | 2020-03-27T09:23:01.554506 | 2019-05-03T17:39:26 | 2019-05-03T17:39:26 | 146,336,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 646 | py | from sqlalchemy import Column, String, Float
from model import Base
import uuid
class Share(Base):
__tablename__ = "share"
share_id = Column(String(50), primary_key=True)
share_name = Column(String(50), nullable=False)
share_value = Column(Float, nullable=False)
def __init__(self, share_id=None, share_name=None, share_value=None):
if share_id is None:
self.share_id = uuid.uuid4()
else:
self.share_id = share_id
self.share_name = share_name
self.share_value = share_value
def __repr__(self):
return "<Share %s %s>" % (self.share_id, self.share_name)
| [
"[email protected]"
] | |
27d432abd0604bcf61d61a54c35a8adcd68edc9c | 83e85c99ebe9c55f88ea5c5232f0c299ba051542 | /chap12.1.py | 2c2388a4467f036e9b40d47c72a0489b95e98082 | [] | no_license | e-allen/python_files | c69bc5d006cc972ad7a3d1afeb54342a089f7056 | c5ca5d64e6ea57da5925bb5a9fd6432db205594b | refs/heads/master | 2020-03-30T17:18:03.548301 | 2018-10-03T17:24:07 | 2018-10-03T17:24:07 | 151,449,438 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 360 | py |
class Apple():
def __init__(self, w, c, t, s):
self.weight = w
self.color = c
self.taste = t
self.stem = s
print("Created!")
ap1 = Apple(4, "dark red", "sweet", "no stem")
ap2 = Apple(10, "yellow", "sweet", "stem")
ap3 = Apple(5, "green", "sour", "stem")
ap4 = Apple(8, "red and yellow", "semi sweet", "no stem")
| [
"[email protected]"
] | |
b804240baaa864ffd6fa64fef76ebb4960996289 | 40bfe140480d42750aa4a28fa07c2d25c6c51891 | /apps/profile_app/tests/test_views.py | ccbc3839575c4f04bdcf8dd3b20a1280ac20414c | [] | no_license | kuzentio/mitra | bed5750a1f34bbfeab48aaaa1ca6907b0a04abd8 | f1da84ca5657c8741141cff145487fa6e29b5cfe | refs/heads/master | 2023-04-11T07:16:11.978486 | 2019-03-19T16:22:57 | 2019-03-19T16:22:57 | 135,567,211 | 0 | 1 | null | 2021-03-01T15:49:57 | 2018-05-31T10:09:35 | Python | UTF-8 | Python | false | false | 745 | py | from django.test import TestCase, Client
from django.urls import reverse
from apps.profile_app.factories import AccountFactory, UserFactory
from apps.profile_app.models import Account
client = Client()
class TestStrategyListView(TestCase):
def setUp(self):
self.user = UserFactory()
self.user.set_password('123')
self.user.save()
self.account = AccountFactory(user=self.user)
def test_account_list_view_success(self):
client.login(username=self.user.username, password='123')
response = client.get(reverse('profile_app:accounts'))
self.assertEqual(
list(response.context[0]['account_list']), list(Account.objects.filter(user=self.user, is_active=True))
)
| [
"[email protected]"
] | |
b7b57b92183ffd683c12900630b9833c1e6799d2 | 552d32dcc039b610f39f5b34f8c470f68045ef1f | /main.py | 175b95a3005aa834c7a0667ed3eb9b06b6199434 | [] | no_license | mitchgu/3DLatticeUtility | c4f127a82337c1ccc74fb0e92ac2776fa5eddcbd | f6d9b5f8760d8c0c029d99192d66a1a7b4b47e5d | refs/heads/master | 2020-05-18T09:42:19.752173 | 2015-06-19T19:16:51 | 2015-06-19T19:16:51 | 37,342,286 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,944 | py | import lattice as lt
from lattice_renderer import LatticeRenderer
from toolpathing import Toolpath
import sys, cProfile, math
def main():
if len(sys.argv) != 3:
sys.exit("Please supply two arguments: the path to the exported mesh nodes and the path to the exported stresses.")
node_file_path = sys.argv[1]
stress_file_path = sys.argv[2]
cunit_size = float(raw_input("Enter cube unit size (mm) [Default 10mm]: ") or 10)
mesh_size = float(raw_input("Enter resolution of mesh (mm) [Default is cube unit size]: ") or cunit_size)
stress_scale = float(raw_input("Enter stress magnitude scale [Default 1]: ") or 1)
extrude_width = float(raw_input("Enter filament extrusion width [Default 1mm]: ") or 1.0)
render_method = raw_input("Use dynamic lattice loading? (slower) [Y or N] [Default N]: ") or 'N'
render_dynamic_lattice = True if render_method == 'Y' or render_method == 'y' else False
generate_toolpath = raw_input("Generate toolpath? [Y or N] [Default N]: ") or 'N'
generate_toolpath = True if generate_toolpath == 'Y' or generate_toolpath == 'y' else False
max_n = int(math.floor(cunit_size / extrude_width) - 1)
print "Creating a stress mesh from FEA files"
stress_mesh = lt.StressMesh(node_file_path,stress_file_path, mesh_size)
print "Generating a lattice from stress mesh and parameters given"
lattice = lt.Lattice(stress_mesh, cunit_size, max_n, stress_scale)
if generate_toolpath:
print "Generating toolpath"
toolpath = Toolpath(lattice)
print "Setting up the visualization window"
lr = LatticeRenderer()
if generate_toolpath:
print "Loading the toolpath into the visualization"
lr.load_toolpath(toolpath)
print "Loading the lattice into the visualization"
if render_dynamic_lattice:
lr.load_dynamic_lattice(lattice)
else:
lr.load_lattice(lattice)
print "Running visualization"
lr.render()
if __name__ == '__main__':
main()
#cProfile.run('main()') | [
"[email protected]"
] | |
4d2cf215d2757bcd45ce921c6ca8dddf36b51ee2 | 563a2232fd932679853902aacb5f520513d33677 | /main.py | 7e90c7caa5cca3f5601572262c89f6fcdf1e4040 | [] | no_license | MRNP/DeepXS | 2b6427df3565d9138434ce31c690304408ff9446 | cbb3919c44d56c174357ec430d67c9922700755d | refs/heads/master | 2020-04-16T13:47:59.614068 | 2018-12-12T17:13:16 | 2018-12-12T17:13:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,931 | py | import argparse
import os, sys
from preprocessing import preprocess_c1c1, preprocess_n2n2, preprocess_n2c1p, preprocess_n2c1m
from AI_builder import build_c1c1_AI, build_n2n2_AI, build_n2c1p_AI, build_n2c1m_AI
from predictors import predict_c1c1, predict_n2n2, predict_n2c1p, predict_n2c1m
from reader import read_input
from keras import backend as K
import numpy as np
"""Essential Functions"""
#argparse for indicating the pairs to predict
parser = argparse.ArgumentParser(description='DeepXS - predicting cross-sections since 2018')
parser.add_argument('-pairs', dest='integers', type=int, default=0,
help='Defines the pair(s). Check readme.txt for the specification.')
parser.add_argument('-return', dest='order', action='store', default='both',
help='Specifies if you want the cross-sections at leading (LO) or next-to-leading order (NLO). If you want both, specify -return both')
parser.add_argument('-stream', dest='stream', action='store', default=1, help='Specifies if you want the tensorflow session to continue. 0: no streaming. 1: You will then have the possibility to continuously feed input data as DeepXS waits for new input. 2: DeepXS will predict the cross-sections for every model in slha_dump.')
parser.add_argument('-array', dest='array', action='store', default=0, help='Declares if you want to give the input as an array (1) from a .txt file or not (0) and use an SLHA-file instead.')
parser.add_argument('-fn', dest='fn', action='store', type=str, help='Specifies the .txt file that DeepXS reads to predict the corresponding cross-section(s). The required layout is described in the readme.txt.')
args = parser.parse_args()
if int(args.integers) < 15 and int(args.integers) >= 0:
setting = int(args.integers)
print('You have specified to return the cross-section for pair combination ' + str(args.integers) + '.')
else:
print('Please specify an integer that is between 0 and 14.')
exit()
if args.order == 'LO':
print('Summoning the AI to predict the leading order cross-section.')
LO=1
NLO=0
elif args.order == 'NLO':
print('Summoning the AI to predict the next-to-leading order cross-section.')
LO=0
NLO=1
elif args.order == 'both':
print('Summoning the AI to predict the leading and next-to-leading order cross-section.')
LO=1
NLO=1
else:
print('Please specify either if you want me to return the LO, NLO or both cross-sections. You can do this by running the script via "python DeepXS.py PAIR_NUMBER -return LO/NLO/both". E.g. if you want to predict all pairs at NLO, do "python DeepXS.py 0 -return both".')
exit()
if int(args.array) == 0:
print('Reading input from SLHA file')
array=0
elif int(args.array) == 1:
print('Reading input from an array')
array=1
else:
print('Please either specify -array 0 or -array 1.')
exit()
if int(args.stream) == 0:
active = False
elif int(args.stream) == 1:
print('Opening a continuous portal to our AI for continuous streaming of input and output.')
active = True
elif int(args.stream) == 2:
print('Opening a continuous portal to our AI for continuous streaming of input and output.')
else:
print('Please specify either -stream 0, -stream 1 or -stream 2.')
exit()
fn = str(args.fn)
print('\n Welcome to DeepXS version Alpha 2\n')
""" MAIN """
#read input
input_data = read_input(fn, array=array)
#pair settings
c1c1_occurences = [0,1,5,6,7,11,12,13]
n2n2_occurences = [0,2,5,8,9,11,12,14]
n2c1p_occurences = [0,3,6,8,10,11,13,14]
n2c1m_occurences = [0,4,7,9,10,12,13,14]
if int(args.stream) == 0:
#c1c1
for i in c1c1_occurences:
if i == setting:
c1c1_LO_data, c1c1_NLO_data = preprocess_c1c1(input_data, LO=1, NLO=1, array=array)
c1c1_LO_model, c1c1_NLO_model = build_c1c1_AI(LO_weightfile='./c1c1/c1c1_LO.hdf5', K_weightsfile='./c1c1/c1c1_K.hdf5', LO=1, NLO=1)
c1c1_LO, c1c1_K, c1c1_NLO = predict_c1c1(LO_data=c1c1_LO_data,NLO_data=c1c1_NLO_data,LO_model=c1c1_LO_model,NLO_model=c1c1_NLO_model,LO=1,NLO=1)
#n2n2
for i in n2n2_occurences:
if i == setting:
n2n2_LO_data, n2n2_NLO_data = preprocess_n2n2(input_data, LO=1, NLO=1, array=array)
n2n2_LO_model, n2n2_NLO_model = build_n2n2_AI(LO_weightfile='./n2n2/n2n2_LO.hdf5', K_weightsfile='./n2n2/n2n2_K.hdf5', LO=0, NLO=1)
n2n2_LO, n2n2_K, n2n2_NLO = predict_n2n2(LO_data=n2n2_LO_data,NLO_data=n2n2_NLO_data,LO_model=n2n2_LO_model,NLO_model=n2n2_NLO_model,LO=0,NLO=1)
#n2c1p
for i in n2c1p_occurences:
if i == setting:
n2c1p_LO_data, n2c1p_NLO_data = preprocess_n2c1p(input_data, LO=1, NLO=1, array=array)
n2c1p_LO_model_general, n2c1p_LO_model_specialised, n2c1p_NLO_model_general, n2c1p_NLO_model_specialised = build_n2c1p_AI(LO_gen_weightfile='./n2c1p/n2c1+_LO_gen.hdf5', LO_spec_weightfile='./n2c1p/n2c1+_LO_spec.hdf5', K_gen_weightsfile='./n2c1p/n2c1+_K_gen.hdf5', K_spec_weightsfile='./n2c1p/n2c1+_K_spec.hdf5', LO=1, NLO=1)
n2c1p_LO, n2c1p_K, n2c1p_NLO = predict_n2c1p(LO_data=n2c1p_LO_data,NLO_data=n2c1p_NLO_data,LO_model_gen=n2c1p_LO_model_general, LO_model_spec=n2c1p_LO_model_specialised, NLO_model_gen=n2c1p_NLO_model_general, NLO_model_spec = n2c1p_NLO_model_specialised, LO=1,NLO=1)
#n2c1m
for i in n2c1m_occurences:
if i == setting:
n2c1m_LO_data, n2c1m_NLO_data = preprocess_n2c1m(input_data, LO=1, NLO=1, array=array)
n2c1m_LO_model_gen, n2c1m_LO_model_spec, n2c1m_NLO_model = build_n2c1m_AI(LO_gen_weightfile='./n2c1m/n2c1-_LO_gen.hdf5', LO_spec_weightfile='./n2c1m/n2c1-_LO_spec.hdf5', K_weightsfile='./n2c1m/n2c1-_K.hdf5', LO=1, NLO=1)
n2c1m_LO, n2c1m_K, n2c1m_NLO = predict_n2c1m(LO_data=n2c1m_LO_data,NLO_data=n2c1m_NLO_data,LO_model_gen=n2c1m_LO_model_gen,LO_model_spec=n2c1m_LO_model_spec,NLO_model=n2c1m_NLO_model,LO=1,NLO=1)
if int(args.stream) == 1:
while active == True:
#c1c1
for i in c1c1_occurences:
if i == setting:
c1c1_LO_data, c1c1_NLO_data = preprocess_c1c1(input_data, LO=1, NLO=1, array=array)
c1c1_LO_model, c1c1_NLO_model = build_c1c1_AI(LO_weightfile='./c1c1/c1c1_LO.hdf5', K_weightsfile='./c1c1/c1c1_K.hdf5', LO=1, NLO=1)
c1c1_LO, c1c1_K, c1c1_NLO = predict_c1c1(LO_data=c1c1_LO_data,NLO_data=c1c1_NLO_data,LO_model=c1c1_LO_model,NLO_model=c1c1_NLO_model,LO=1,NLO=1)
#n2n2
for i in n2n2_occurences:
if i == setting:
n2n2_LO_data, n2n2_NLO_data = preprocess_n2n2(input_data, LO=1, NLO=1, array=array)
n2n2_LO_model, n2n2_NLO_model = build_n2n2_AI(LO_weightfile='./n2n2/n2n2_LO.hdf5', K_weightsfile='./n2n2/n2n2_K.hdf5', LO=0, NLO=1)
n2n2_LO, n2n2_K, n2n2_NLO = predict_n2n2(LO_data=n2n2_LO_data,NLO_data=n2n2_NLO_data,LO_model=n2n2_LO_model,NLO_model=n2n2_NLO_model,LO=0,NLO=1)
#n2c1p
for i in n2c1p_occurences:
if i == setting:
n2c1p_LO_data, n2c1p_NLO_data = preprocess_n2c1p(input_data, LO=1, NLO=1, array=array)
n2c1p_LO_model_general, n2c1p_LO_model_specialised, n2c1p_NLO_model_general, n2c1p_NLO_model_specialised = build_n2c1p_AI(LO_gen_weightfile='./n2c1p/n2c1+_LO_gen.hdf5', LO_spec_weightfile='./n2c1p/n2c1+_LO_spec.hdf5', K_gen_weightsfile='./n2c1p/n2c1+_K_gen.hdf5', K_spec_weightsfile='./n2c1p/n2c1+_K_spec.hdf5', LO=1, NLO=1)
n2c1p_LO, n2c1p_K, n2c1p_NLO = predict_n2c1p(LO_data=n2c1p_LO_data,NLO_data=n2c1p_NLO_data,LO_model_gen=n2c1p_LO_model_general, LO_model_spec=n2c1p_LO_model_specialised, NLO_model_gen=n2c1p_NLO_model_general, NLO_model_spec = n2c1p_NLO_model_specialised, LO=1,NLO=1)
#n2c1m
for i in n2c1m_occurences:
if i == setting:
n2c1m_LO_data, n2c1m_NLO_data = preprocess_n2c1m(input_data, LO=1, NLO=1, array=array)
n2c1m_LO_model_gen, n2c1m_LO_model_spec, n2c1m_NLO_model = build_n2c1m_AI(LO_gen_weightfile='./n2c1m/n2c1-_LO_gen.hdf5', LO_spec_weightfile='./n2c1m/n2c1-_LO_spec.hdf5', K_weightsfile='./n2c1m/n2c1-_K.hdf5', LO=1, NLO=1)
n2c1m_LO, n2c1m_K, n2c1m_NLO = predict_n2c1m(LO_data=n2c1m_LO_data,NLO_data=n2c1m_NLO_data,LO_model_gen=n2c1m_LO_model_gen,LO_model_spec=n2c1m_LO_model_spec,NLO_model=n2c1m_NLO_model,LO=1,NLO=1)
#request new file
new_SLHA_file = input("Type 'exit' to exit.\n Otherwise, specify the file to process next: ")
if new_SLHA_file == exit:
active = False
else:
input_data = read_input(new_SLHA_file, array=0)
if int(args.stream) == 2:
#in the next version convert list of SLHA inputs into array and then use NNs only once. The current way is unnecessarily slow.
SLHA_file_list = []
for x in os.listdir('./SLHA_dump'):
z = './SLHA_dump/' + str(x)
SLHA_file_list.append(z)
print(SLHA_file_list)
print(np.shape(SLHA_file_list))
for i in range(0, len(SLHA_file_list)):
input_data = read_input(SLHA_file_list[i], array=0)
#c1c1
for i in c1c1_occurences:
if i == setting:
c1c1_LO_data, c1c1_NLO_data = preprocess_c1c1(input_data, LO=1, NLO=1, array=array)
c1c1_LO_model, c1c1_NLO_model = build_c1c1_AI(LO_weightfile='./c1c1/c1c1_LO.hdf5', K_weightsfile='./c1c1/c1c1_K.hdf5', LO=1, NLO=1)
c1c1_LO, c1c1_K, c1c1_NLO = predict_c1c1(LO_data=c1c1_LO_data,NLO_data=c1c1_NLO_data,LO_model=c1c1_LO_model,NLO_model=c1c1_NLO_model,LO=1,NLO=1)
#n2n2
for i in n2n2_occurences:
if i == setting:
n2n2_LO_data, n2n2_NLO_data = preprocess_n2n2(input_data, LO=1, NLO=1, array=array)
n2n2_LO_model, n2n2_NLO_model = build_n2n2_AI(LO_weightfile='./n2n2/n2n2_LO.hdf5', K_weightsfile='./n2n2/n2n2_K.hdf5', LO=0, NLO=1)
n2n2_LO, n2n2_K, n2n2_NLO = predict_n2n2(LO_data=n2n2_LO_data,NLO_data=n2n2_NLO_data,LO_model=n2n2_LO_model,NLO_model=n2n2_NLO_model,LO=0,NLO=1)
#n2c1p
for i in n2c1p_occurences:
if i == setting:
n2c1p_LO_data, n2c1p_NLO_data = preprocess_n2c1p(input_data, LO=1, NLO=1, array=array)
n2c1p_LO_model_general, n2c1p_LO_model_specialised, n2c1p_NLO_model_general, n2c1p_NLO_model_specialised = build_n2c1p_AI(LO_gen_weightfile='./n2c1p/n2c1+_LO_gen.hdf5', LO_spec_weightfile='./n2c1p/n2c1+_LO_spec.hdf5', K_gen_weightsfile='./n2c1p/n2c1+_K_gen.hdf5', K_spec_weightsfile='./n2c1p/n2c1+_K_spec.hdf5', LO=1, NLO=1)
n2c1p_LO, n2c1p_K, n2c1p_NLO = predict_n2c1p(LO_data=n2c1p_LO_data,NLO_data=n2c1p_NLO_data,LO_model_gen=n2c1p_LO_model_general, LO_model_spec=n2c1p_LO_model_specialised, NLO_model_gen=n2c1p_NLO_model_general, NLO_model_spec = n2c1p_NLO_model_specialised, LO=1,NLO=1)
#n2c1m
for i in n2c1m_occurences:
if i == setting:
n2c1m_LO_data, n2c1m_NLO_data = preprocess_n2c1m(input_data, LO=1, NLO=1, array=array)
n2c1m_LO_model_gen, n2c1m_LO_model_spec, n2c1m_NLO_model = build_n2c1m_AI(LO_gen_weightfile='./n2c1m/n2c1-_LO_gen.hdf5', LO_spec_weightfile='./n2c1m/n2c1-_LO_spec.hdf5', K_weightsfile='./n2c1m/n2c1-_K.hdf5', LO=1, NLO=1)
n2c1m_LO, n2c1m_K, n2c1m_NLO = predict_n2c1m(LO_data=n2c1m_LO_data,NLO_data=n2c1m_NLO_data,LO_model_gen=n2c1m_LO_model_gen,LO_model_spec=n2c1m_LO_model_spec,NLO_model=n2c1m_NLO_model,LO=1,NLO=1)
K.clear_session()
| [
"[email protected]"
] | |
7db380b89b0304b2f834f9eaf4024f367c492bdb | 3f4eda28a7d390eeb6f8196dfba50e8f77f26431 | /bot/bot.py | 84143511b1878b9b51d3ae24196bf4a946d04502 | [] | no_license | SushiPython/comment-leaderboards | d8eb9c2ffd9449e18016db0c9e1792f7e86bd901 | 4499c5560bdb54aa788c037449a50ad9dcc0f4bd | refs/heads/main | 2023-07-01T05:51:22.180098 | 2021-08-10T17:07:36 | 2021-08-10T17:07:36 | 394,316,866 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,048 | py | from discord.ext import commands
import discord
import pymongo
import datetime
# Customizable Values
comment_channel_id =
database_username = ""
database_password = ""
bot_token = ""
mongodb_url = f""
# End of customizable values
c = pymongo.MongoClient(mongodb_url)
db = c.main.entries
prefix = "?"
bot = commands.Bot(command_prefix=prefix)
@bot.event
async def on_ready():
print("websocket active")
@bot.event
async def on_message(message):
if message.author.bot: return
error = False
content_lines_split = message.content.split('\n')
if len(content_lines_split) == 1:
error = True
error_reason = "Not all data included"
youtube_url = content_lines_split[0]
comment_content = content_lines_split[1]
if db.count_documents({"comment": comment_content}) > 0:
error = True
error_reason = "Comment already submitted"
if len(content_lines_split) == 3:
image_url = content_lines_split[2]
elif message.attachments > 0:
image_url = message.attachments[0].url
else:
error = True
error_reason = "Image not attatched"
await message.delete()
if error == True:
await message.channel.send(f'<@{message.author.id}> Process error, reason: `{error_reason}`')
else:
embed=discord.Embed(title="Comment Info", description="After 4 reactions, comment will be added")
embed.add_field(name="User", value=message.author.name, inline=False)
embed.add_field(name="Comment", value=comment_content, inline=False)
embed.add_field(name="Video", value=youtube_url, inline=False)
embed.set_image(url=image_url)
sent = await message.channel.send(embed=embed)
await sent.add_reaction('๐')
db.insert_one({
"videoUrl": youtube_url,
"youtubeId": youtube_url[-11:], #may not always be accurate
"userTag": f"{message.author.display_name}#{message.author.discriminator}",
"userId": message.author.id,
"type": "comment",
"time": datetime.datetime.now(),
"messageId": sent.id,
"verified": False,
"comment": comment_content,
"avatar": message.author.avatar,
"comment_url": image_url
})
@bot.event
async def on_raw_reaction_add(payload):
if payload.channel_id == comment_channel_id:
if payload.emoji.name == "๐":
channel = bot.get_channel(comment_channel_id)
message = await channel.fetch_message(payload.message_id)
reaction = discord.utils.get(message.reactions, emoji=payload.emoji.name)
print(reaction.count)
if reaction and reaction.count > 3:
await message.delete()
db.update_one({
"messageId": message.id
},
{"$set": {
"verified": True
}})
await message.channel.send("Comment verified, thanks!")
bot.run(bot_token)
| [
"[email protected]"
] | |
db2901f57c94c05aba568bf3a16e2a6241f83853 | 1db08ed38f3ad5c5357dbad66fed955a74c8434d | /general/management/commands/nobr_bot.py | a2950640bb30145ca983116aefc6187ed0e15da4 | [] | no_license | fizzy123/nobr.me | 347980d70fd6284970dc51ab3c5020003bd6d94d | b7acd4c39390dc6f497bc35b53d547ad04fe275c | refs/heads/master | 2020-06-04T14:10:25.236569 | 2015-12-11T19:31:22 | 2015-12-11T19:31:22 | 28,760,518 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | from django.core.management.base import BaseCommand, CommandError
from general.functions import nobr_bot
class Command(BaseCommand):
help = 'tweet breetz'
def handle(self, *args, **options):
nobr_bot()
| [
"[email protected]"
] | |
06be0608c54ee8e15f03e8344a98575d0bf9f88d | be918598badb564aa134990276a06c3524317e59 | /chaco/tests/serializable_test_case.py | e364f1a1f87d65b74a7ac910e561ef1fd1abdbce | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain"
] | permissive | janvonrickenbach/Chaco_wxPhoenix_py3 | 3ac11aaa5a452afa526edaf2c68c166709b94b90 | 21a10cfd81100f28e3fbc273357ac45642519f33 | refs/heads/master | 2020-08-03T20:03:55.983524 | 2019-12-18T10:47:24 | 2019-12-18T10:47:24 | 211,870,011 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,675 | py | from pickle import loads, dumps
import unittest
# pickling child classes doesn't work well in the unittest framework unless
# the classes to be pickled are in a different file
from .serializable_base import Circle, Poly
class SimpleSerializationTestCase(unittest.TestCase):
def compare_traits(self, a, b, trait_names=None):
"Checks the traits of objects 'a' and 'b' and makes sure they all match."
if trait_names is None:
trait_names = a.trait_names()
for name in trait_names:
if name in ("trait_added", "trait_modified"):
continue
o1 = getattr(a, name)
o2 = getattr(b, name)
if isinstance(o1, list) or isinstance(o1, tuple):
print("Warning: Cowardly refusing to do deep compares")
else:
self.assertTrue(o1 == o2)
return
def test_basic_save(self):
c = Circle(radius=5.0, name="c1", x=1.0, y=2.0)
c2 = loads(dumps(c))
for attrib in ("tools", "filled", "color", "x", "radius"):
self.assertTrue(getattr(c, attrib) == getattr(c2, attrib))
self.assertEqual(c2.y, 2.0)
return
def test_basic_save2(self):
p = Poly(numside=3, name="poly", x=3.0, y=4.0)
p2 = loads(dumps(p))
for attrib in ("tools", "filled", "color", "x", "numsides", "length"):
self.assertTrue(getattr(p, attrib) == getattr(p2, attrib))
self.assertEqual(p2.y, 4.0)
return
class PlotSerializationTestCase(unittest.TestCase):
pass
if __name__ == '__main__':
import nose
nose.run()
| [
"[email protected]"
] | |
32cc59bc869726623d8967e5572d3c7249cba2cb | 30a4495fa1ebd7c61de1f8850f47b3b0e9f4c0b2 | /Majfunction.py | cb13a603248149858a0839f020567aa67fe387ca | [] | no_license | artenicus/my-first-base | 6e66955135a311a8b61cb43ed744012cb4573b01 | 6d9f88a8e02383cc94f7d34384cd364751ccaeb9 | refs/heads/master | 2022-11-25T03:27:59.494769 | 2020-07-23T06:57:30 | 2020-07-23T06:57:30 | 281,055,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 272 | py | def siemanko(name):
print('Siema bublu')
print('co tam sลychaฤ w paลstwie duลskim?')
metalowcy = ['asterix', 'panoramix']
for name in metalowcy:
print('znowu ty? ')
print('iฤ stont '+ name + "!")
siemanko('asterix')
for i in range(1, 9):
print(i)
| [
"[email protected]"
] | |
4a8f297e04d532d82920bb3b2be932db11d69571 | 44edea4cde086d530e59f52e63c26b608fbef3bc | /control_structures_exercises.py | d1402da954f9c472a28136e9a21c54a383ad8a23 | [] | no_license | Jason-Tellez/python-exercises | 7bb17987ae16347502c2e6ec9b75d9b626858822 | 9e5f8db4948ac74675b37e088c2b4a6db5a344ff | refs/heads/master | 2023-06-19T04:29:00.356841 | 2021-07-20T20:35:45 | 2021-07-20T20:35:45 | 384,188,442 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,150 | py | #1. Conditional Basics
#a. prompt the user for a day of the week, print out whether the day is Monday or not
user_input = input("What day is today?")
if user_input == "Monday":
print("I hate Mondays!")
else:
print('Have a good day!')
#b. prompt the user for a day of the week, print out whether the day is a weekday or a weekend
day = input("What day is today?")
if day == "Monday" or "Tuesday" or "Wednesday" or "Thursday":
print("It's a weekday.")
elif day == "Friday" or "Saturday" or "Sunday":
print("It's the weekend!")
#c. create variables and make up values for
#the number of hours worked in one week
#the hourly rate
#how much the week's paycheck will be
#write the python code that calculates the weekly paycheck. You get paid time and a half if you work more than 40 hours
hours_a_week = 45
hourly_rate = 7.25
paycheck = hourly_rate * hours_a_week
if hours_a_week > 40:
overtime = (hours_a_week - 40) * (hourly_rate * 1.5)
paycheck = 40 * hourly_rate + overtime
print("Your weekly pay is", paycheck)
else:
print("Your weekly pay is", paycheck)
#2. Loop Basics
#a. While
#Create an integer variable i with a value of 5.
#Create a while loop that runs so long as i is less than or equal to 15
#Each loop iteration, output the current value of i, then increment i by one.
i = 5
while i <= 15:
print(i)
i += 1
#Create a while loop that will count by 2's starting with 0 and ending at 100. Follow each number with a new line.
i = 0
while i <= 100:
print(i)
i += 2
#Alter your loop to count backwards by 5's from 100 to -10.
i = 100
while i <= 100 and i >= -10:
print(i)
i -= 5
#Create a while loop that starts at 2, and displays the number squared on each line while the number is less than 1,000,000.
i = 2
while i < 1000000:
print(i)
i = i ** 2
#Write a loop that counts from 100 to 5 in increments of 5.
i = 100
while i <= 100 and i >= 5:
print(i)
i -= 5
#b. #For Loops
#Write some code that prompts the user for a number, then shows a multiplication table up through 10 for that number.
num = input("Gimme a number!")
num = int(num)
for m in range(1, 11):
print(num, 'x', m, '=', num * m)
#Create a for loop that uses print to create the output shown below.
for m in range(1,10):
product = str(m) * m
print(product)
#c break and continue
#Prompt the user for an odd number between 1 and 50. Use a loop and a break statement to continue prompting the user if they enter invalid input.
#(Hint: use the isdigit method on strings to determine this).
#Use a loop and the continue statement to output all the odd numbers between 1 and 50, except for the number the user entered.
number = input("Pick an odd number between 1 and 50.")
while number.isdigit() == False or int(number) % 2 == 0:
print(f"{number} is not a valid entry!")
number = input("Please pick a valid odd number between 1 and 50.")
i_quit = input("If you want to be a quitter, type 'Y'")
if i_quit == "Y" or i_quit == "y":
break
else:
number = input("Please pick a valid odd number between 1 and 50.")
continue
print("Number to skip is:", number)
for i in range(1,51):
if i % 2 == 1 and i != int(number):
print("Here is an odd number:", i)
if i == int(number):
print("Yikes! Skipping number:", number)
#d. The input function can be used to prompt for input and use that input in your python code. Prompt the user to enter a positive number and write a loop that counts from 0 to that number. (Hints: first make sure that the value the user entered is a valid number, also note that the input function returns a string, so you'll need to convert this to a numeric type.)
positive_number = input("Pick a positive integer.")
while positive_number.isdigit() == False or int(positive_number) <= 0 or int(positive_number) % 1 != 0:
print(f"{positive_number} is not a valid input.")
try_again = input("Continue? Y/N?")
if try_again == "N" or try_again == "n":
print("okie dokie")
break
else:
positive_number = input("Pick a positive integer.")
continue
for num in range(0,int(positive_number) + 1):
print(num)
#e. Write a program that prompts the user for a positive integer. Next write a loop that prints out the numbers from the number the user entered down to 1.
positive_int = input("Pick a positive integer.")
while positive_int.isdigit() == False or int(positive_int) <= 0 or int(positive_int) % 1 != 0:
print(f"{positive_int} is not a valid input.")
try_again = input("Continue? Y/N?")
if try_again == "N" or try_again == "n":
print("okie dokie")
break
else:
positive_int = input("Pick a positive integer.")
continue
for num in range(int(positive_int), 0, -1):
print(num)
#3. Fizzbuzz
#Write a program that prints the numbers from 1 to 100.
#For multiples of three print "Fizz" instead of the number
#For the multiples of five print "Buzz".
#For numbers which are multiples of both three and five print "FizzBuzz".
for int in range(1, 100 +1):
if int % 3 == 0 and int % 5 != 0:
print("Fizz")
elif int % 5 == 0 and int % 3 != 0:
print("Buzz")
elif int % 5 == 0 and int % 3 == 0:
print("FizzBuzz")
else:
print(int)
#4. Display a table of powers.
#Prompt the user to enter an integer.
#Display a table of squares and cubes from 1 to the value entered.
#Ask if the user wants to continue.
#Assume that the user will enter valid data.
#Only continue if the user agrees to.
integer = int(input("Gimme any integer to get a neat table."))
header = " {:^4} " #header, line, and body variable create and space the table when printed
body = " {:^6} "
body2 = " {:^7} "
line = " {:^6} "
line2 = " {:^7} "
print(header.format('number'), "|", header.format('squared'), "|", header.format('cubed'))
print(line.format('------'), "|", line2.format('-------'), "|", line.format('-----'))
confirmation = input("If you want to quit, type 'N'. Else, type anything!")
while int(integer) == True:
if confirmation == "N" or confirmation == "n":
print("okie dokie")
break
else:
print("Here you go!")
continue
if integer >= 1:
for x in range(1, integer + 1):
print(body.format(x), "|", body2.format(x ** 2), "|", body.format(x ** 3))
if integer < 1:
for x in range(1, integer - 1, -1):
print(body.format(x), "|", body2.format(x ** 2), "|", body.format(x ** 3))
#Bonus. Convert given number grades into letter grades.
#Prompt the user for a numerical grade from 0 to 100.
#Display the corresponding letter grade.
#Prompt the user to continue.
#Assume that the user will enter valid integers for the grades.
#The application should only continue if the user agrees to.
#Grade Ranges:
#A : 100 - 88
#B : 87 - 80
#C : 79 - 67
#D : 66 - 60
#F : 59 - 0
number_grade = int(input("What grade did you receive?")) #int() ensures input will be an integer
user_confirmation = input("If you would like to receive your grade, type 'Y'. Otherwise, type anything.") #variable to confirm user want to receive grade
if user_confirmation == "y" or user_confirmation == "Y": #this if condition runs if user wants to receive grade
print("Here is your grade.")
if number_grade >=88 and number_grade <= 100:
print("A")
elif number_grade >=87 and number_grade <= 80:
print("B")
elif number_grade >=67 and number_grade <= 79:
print("C")
elif number_grade >=60 and number_grade <= 66:
print("D")
else:
print("F")
else: #if user decides not to receive grade, this else condition will run and user will not receive letter grade
print("Have a nice day!")
#Create a list of dictionaries where each dictionary represents a book that you have read.
# Each dictionary in the list should have the keys title, author, and genre. Loop through the list and print out information about each book.
books = [
{
'title': 'In the Heart of the Sea: The Tragedy of the Whaleship Essex',
'author': 'Nathaniel Philbrick',
'genre': 'History'
},
{
'title': 'What Is the What',
'author': 'Dave Eggers',
'genre': 'fiction'
},
{
'title': 'Green Eggs and Ham',
'author': 'Dr. Seuss',
'genre': 'epic'
},
{
'title': 'Uneducated',
'author': 'Tara Westover',
'genre': 'memoir'
},
{
'title': 'Unbroken: A World War II Story of Survival, Resilience, and Redemption',
'author': 'Laura Hillenbrand',
'genre': 'biography'
}
]
for book in books:
print("Title:", book['title'])
print("Author:", book['author'])
print("Genre:", book['genre'])
print()
#a. Prompt the user to enter a genre, then loop through your books list and print out the titles of all the books in that genre.
user_input = input("Enter a genre!")
for book in books:
if book['genre'] == user_input.lower():
print([book])
else:
print("Try again!")
break | [
"[email protected]"
] | |
98999f24a74028e629158efd13cd6297f5477e1d | c4b9ae6636a09229feaeac94eb35e159cfe96c4d | /env/style_volume.py | 9ea09d97fe4a45e8ea306be791b49caf0d010405 | [] | no_license | milkpku/spacetimeBounds | b1dcd4415967ebb7d36151bfabd69aa69e52e72f | 9ca9b64adb719e0fe9da8e1d35592e6a0fbcbac7 | refs/heads/master | 2023-04-30T11:46:49.420755 | 2021-05-12T09:18:13 | 2021-05-12T09:18:13 | 359,383,266 | 20 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,493 | py | from env import SpacetimeBoundsEnv
import numpy as np
from Kinematic import KinematicCore
# 1 root(id 0) NONE
# 2 chest(id 1) SPHERICAL
# 3 neck(id 2) SPHERICAL
# 4 right_hip(id 3) SPHERICAL
# 5 right_knee(id 4) REVOLUTE
# 6 right_ankle(id 5) SPHERICAL
# 7 right_shoulder(id 6) SPHERICAL
# 8 right_elbow(id 7) REVOLUTE
# 9 right_wrist(id 8) FIXED
# 10 left_hip(id 9) SPHERICAL
# 11 left_knee(id 10) REVOLUTE
# 12 left_ankle(id 11) SPHERICAL
# 13 left_shoulder(id 12) SPHERICAL
# 14 left_elbow(id 13) REVOLUTE
# 15 left_wrist(id 14) FIXED
convex_nodes = {
"default": list(range(15)),
"upper": [0, 1, 2, 6, 7, 8, 12, 13, 14],
"lower": [0, 3, 4, 5, 9, 10, 11],
"endeffector": [0, 5, 8, 11, 14],
}
class StyleVolumeEnv(SpacetimeBoundsEnv):
def __init__(self, nodes="default", scale=0.12, enlarge=True, **kwargs):
"""
Initialize volume stylize environment
"""
super().__init__(**kwargs)
# joints' weight for penalty
self._nodes = convex_nodes[nodes]
self._scale = scale
self._enlarge = enlarge
def calc_volume(self):
try:
char = self._skeleton._kin_core.getCharacter()
volume = KinematicCore.jointConvexHullVolume(char, self._nodes)
except:
from IPython import embed; embed()
return volume
def calc_reward(self):
vol = self.calc_volume()
r_diff = np.exp(-vol/self._scale)
if self._enlarge:
rwd = (1-r_diff) # encourage volume
else:
rwd = r_diff # discourage volume
return rwd
| [
"[email protected]"
] | |
45b5fd423c306ec58bb3dd03f3941100ccfb5f82 | 2c54d0baba0258f5c409e010efe4a32c0a11c189 | /venv/Scripts/easy_install-script.py | 1c6cf3041f690b5fc6ee9814cc3ab605513402fa | [] | no_license | Rofiatulkhoiriyah/Project-Latihan-Python | dc1c1c6e91ed0f0ebbe86eb62aff461fba1f6ae1 | 254b0ba5eaec2c934d9b9921c8c88974304302e3 | refs/heads/master | 2020-12-28T17:58:27.205824 | 2020-02-05T11:11:04 | 2020-02-05T11:11:04 | 238,431,106 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | #!F:\rofi\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| [
"[email protected]"
] | |
fcdca95a4666d8a74bf02b25831e227dde08b7bb | cad338c91a1fff7d84e4e51a37b6a3bde7c96371 | /dcmrtstruct2nii/adapters/output/niioutputadapter.py | a56379ebfec6bd29e6f6639a198dde82c5088487 | [
"Apache-2.0"
] | permissive | surajpaib/dcmrtstruct2nii | c603388258264a81aa0cdb7ed97984647ad5a627 | e59458290398b99ab9cdbbf81f7c30e97c6e0402 | refs/heads/master | 2020-11-30T15:18:06.935094 | 2019-12-27T16:28:10 | 2019-12-27T16:28:10 | 230,428,153 | 0 | 0 | Apache-2.0 | 2019-12-27T10:59:38 | 2019-12-27T10:59:37 | null | UTF-8 | Python | false | false | 357 | py | from dcmrtstruct2nii.adapters.output.abstractoutputadapter import AbstractOutputAdapter
import SimpleITK as sitk
class NiiOutputAdapter(AbstractOutputAdapter):
def write(self, image, output_path, gzip):
if gzip:
sitk.WriteImage(image, output_path + '.nii.gz')
else:
sitk.WriteImage(image, output_path + '.nii')
| [
"[email protected]"
] | |
f6f46f6bbba0633f0973a126cae7ae5d48d567a8 | fa18fb8de549d4a303d5bad5af9c0fbcb479d546 | /zero_shot/preprocess.py | 28146fd17afa4adc13372f720ede708f40db0d13 | [] | no_license | jirvin16/DLBootCamp | 8ee154e8d065fa0dfbf758effec80fcf2cae66cc | bd02bfee7e08aba14c34c3def6b561c9637e1004 | refs/heads/master | 2021-01-11T04:17:13.955555 | 2016-12-16T02:37:38 | 2016-12-16T02:37:38 | 71,189,177 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,895 | py | from __future__ import division
from __future__ import print_function
import string
import re
import os
from unicodedata import category
from unidecode import unidecode
def preprocess(infile_name, outfile_name, target_language=None):
i = 0
lines = []
with open(infile_name) as infile:
for line in infile:
line = ''.join(unidecode(ch) if category(ch)[0] == 'P' else ch for ch in line.decode('utf8'))
for ch in string.punctuation:
line = line.replace(ch, " " + ch + " ")
line = re.sub("[\t *]+", " ", line)
i += 1
if i % 10000 == 0:
print(i)
if target_language:
lines.append("<{}> ".format(target_language) + line.strip() + "\n")
else:
lines.append(line.strip() + "\n")
with open(outfile_name, 'wb') as outfile:
outfile.write("".join([line.encode('utf8') for line in lines]))
if not os.path.isdir("/deep/group/dlbootcamp/jirvin16/data"):
os.makedirs("/deep/group/dlbootcamp/jirvin16/data")
if not os.path.isdir("/deep/group/dlbootcamp/jirvin16/final_data"):
os.makedirs("/deep/group/dlbootcamp/jirvin16/final_data")
file_triplets = [("/deep/group/dlbootcamp/jirvin16/fr-en/europarl-v7.fr-en.fr", "/deep/group/dlbootcamp/jirvin16/fr-en/data.fr", "en"),
("/deep/group/dlbootcamp/jirvin16/fr-en/europarl-v7.fr-en.en", "/deep/group/dlbootcamp/jirvin16/fr-en/data.en", None),
("/deep/group/dlbootcamp/jirvin16/en-de/train.en", "/deep/group/dlbootcamp/jirvin16/en-de/data.en", "de"),
("/deep/group/dlbootcamp/jirvin16/en-de/train.de", "/deep/group/dlbootcamp/jirvin16/en-de/data.de", None),
("/deep/group/dlbootcamp/jirvin16/fr-de/valid.fr", "/deep/group/dlbootcamp/jirvin16/data/test.fr", "de"),
("/deep/group/dlbootcamp/jirvin16/fr-de/valid.de", "/deep/group/dlbootcamp/jirvin16/data/test.de", None)]
for infile_name, outfile_name, target_language in file_triplets:
preprocess(infile_name, outfile_name, target_language)
| [
"[email protected]"
] | |
b636a95f9750de5d956bc9dd6ad4e939c62ff662 | 0a0e787f6041c46a57de3a8aaff63a8044f0edbd | /model_funcs.py | 7e1942229623a54be2b68307497f1909799d80e1 | [
"MIT"
] | permissive | nick-kopy/Modeling-Electric-Rental-Bike-Lock-Ups | 169ce1847290847d31520e492a7a5295c3de3b33 | d6065c0e586e6e72c1b972f161bc370af3f7dec1 | refs/heads/main | 2023-06-24T23:16:59.955391 | 2021-07-22T21:26:51 | 2021-07-22T21:26:51 | 352,773,608 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,933 | py | # This file contains all the necessary functions for model.ipynb to run
# It mostly collects, cleans, and presents data
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from geopy.distance import geodesic
import statsmodels.api as sm
from statsmodels.stats.outliers_influence import variance_inflation_factor
from sklearn.model_selection import train_test_split, KFold
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import StandardScaler
def grab_data(url):
'''Grabs data from URL and cleans it up for use in EDA.
Output still needs a touch up before modeling however.
'''
# start with data we want
df = pd.read_csv(url, usecols=['ended_at', 'started_at', 'start_station_id', 'rideable_type',
'end_station_id', 'end_lat', 'end_lng', 'member_casual'])
# drop rows w/o lat/long coordinates
df = df[df['end_lat'].notna()]
# drop non-electric bikes
df = df[df['rideable_type'] == 'electric_bike']
df.reset_index(drop=True)
df = df.drop(columns='rideable_type')
# grab date (for matching up with other data)
df['ended_at'] = pd.to_datetime(df['ended_at'])
df['date'] = pd.to_datetime(df['ended_at'].dt.date)
# add a few time related features
# daylight savings makes a few negative trip times, a quick approximate fix is okay
df['hour'] = df['ended_at'].dt.hour
df['started_at'] = pd.to_datetime(df['started_at'])
df['trip_time'] = abs((df['ended_at'] - df['started_at']).dt.total_seconds())
df = df.drop(columns=['ended_at', 'started_at'])
# binary encoding for a few categorical features
df['start_station_id'] = df['start_station_id'].apply(lambda x: 0 if pd.isna(x) else 1)
df['member_casual'] = df['member_casual'].apply(lambda x: 0 if x=='casual' else 1)
return df
def grab_geo(city):
'''Returns dataframe with lat/long of each docking station in a given city.
'''
# Grab full data from desired city
# Stations do change over time so it's better to look at the full time span
if city == 'SF':
geo1 = pd.read_csv('data/SF/202010-baywheels-tripdata.csv', usecols=['end_station_id', 'end_lat', 'end_lng'])
geo2 = pd.read_csv('data/SF/202011-baywheels-tripdata.csv', usecols=['end_station_id', 'end_lat', 'end_lng'])
elif city == 'CH':
geo1 = pd.read_csv('data/CH/202010-divvy-tripdata.csv', usecols=['end_station_id', 'end_lat', 'end_lng'])
geo2 = pd.read_csv('data/CH/202011-divvy-tripdata.csv', usecols=['end_station_id', 'end_lat', 'end_lng'])
else:
return None
# Reduce to one row per station and associated lat/long
geo = pd.concat([geo1, geo2], ignore_index=True).groupby(by='end_station_id').agg(np.mean)
# Rows without a station name also end up as a row, don't need it
geo = geo[geo['end_lat'].notna()]
return geo
def station_dist(row, input_geo):
'''Wrapper function that returns the distance between an input coordinate set
and a row coordinate set.
Applied to the station coordinate dataframe, can be used to make a new column
of distances from a specific point in space.
'''
# geopy uses lat/long specifically in a tuple to calculate distance
stat_geo = tuple([val for idx, val in row.items()])
return geodesic(stat_geo, input_geo).km
def nearest_station(row, station_geo):
'''Returns the distance to the nearest docking station.
Should be applied to a dataframe and the output will be two new columns.
Arguments:
row: row of a pandas dataframe, typically automatically pass when using df.apply(func)
station_geo: df, output of grab_geo() function
Returns:
tuple: the distance to the nearest docking station in meters (float),
and that station's name (str or int)
'''
# simple progress tracker because this takes a long time
if row.name%2000 == 0:
print(round(row.name/254000, 3))
# this function expects lat/long in a specific column position
# if df is changed beforehand, indexing this variable will mess up
row_vals = [val for idx, val in row.items()]
# if statement to catch rows where bikes are already at a station
if not pd.isna(row_vals[1]):
return 0, row_vals[1]
# get row lat/long
row_geo = tuple(row_vals[2:4])
# get distance to each station
s_geo = station_geo.copy()
s_geo['dist'] = s_geo.apply(station_dist, args=[row_geo], axis=1)
# grab the minimum distance and station name
min_id = s_geo['dist'].idxmin()
min_dist = s_geo.at[min_id, 'dist']*1000
# can modify to only return distance if desired
return min_dist, min_id
def grab_weather(city):
'''Returns dataframe of date, temperature, and windspeed.
Other weather measurements are present but not used.
Precipitation in particular should have been useful, but is
measured at 0 every single day for both cities.
San Jose is approximated to have the same weather as San Francisco.
'''
# Grab full data from desired city
if city == 'CH':
url = 'data/CH/99733899999.csv'
elif city == 'SF':
url = 'data/SF/99401699999.csv'
else:
return None
# only grab the columns we want
dfw = pd.read_csv(url, usecols=['DATE', 'TEMP', 'WDSP'])
# Make our date the datetime datatype for merging later
dfw['DATE'] = pd.to_datetime(dfw['DATE'])
return dfw
def grab_traffic(city):
'''Returns dataframe with traffic measurement.
Data for Chicago and San Francisco are different and not directly comparable.
They do both however measure the volume of people using transportation (traffic),
and are scaled during the modeling process so it shouldn't be an issue.
San Jose is approximated to have the same traffic as San Francisco.
'''
# Grab Chicago's traffic data
if city == 'CH':
dft = pd.read_csv('data/CH/CTA_-_Ridership_-_Daily_Boarding_Totals.csv',
usecols=['service_date', 'total_rides'])
# Make our date the datetime datatype for merging later
dft['service_date'] = pd.to_datetime(dft['service_date'])
return dft
# Grab San Francisco's traffic data which needs a touch more cleaning
elif city == 'SF':
dft = pd.read_csv('data/SF/TaxiTable.csv')
dft['Day of Date'] = pd.to_datetime(dft['Day of Date'])
dft['Number of Records'] = dft['Number of Records'].replace(',', '', regex=True).astype('int32')
dft = dft.rename(columns={'Number of Records':'taxi_trips'})
return dft
else:
return None
def get_city(row):
'''Returns the name of a city for a given row.
'''
# if df is changed beforehand indexing this variable will mess up
row_vals = [val for idx, val in row.items()]
stat = row_vals[12]
# looks at station id to figure out which city a row is from
if type(stat) == float:
return 'CH'
elif stat.find('SF') > -1:
return 'SF'
elif stat.find('SJ') > -1:
return 'SJ'
elif stat.find('San Jose Depot') > -1:
return 'SJ'
else:
return 'unknown'
def rmse(true, predicted):
'''Quick root mean squared error function
'''
mean_squared = mean_squared_error(true, predicted)
return np.sqrt(mean_squared)
def cross_val(X_train, y_train, k):
'''Simple CV loop that returns average rmse across k folds.
Useful in getting a more accurate model training error that's less dependent
on the train-test-split.
'''
rmse_arr = []
kf = KFold(n_splits=k)
# Each loop takes a different fold, calculates the error, and saves it in a list
for train_index, test_index in kf.split(X_train):
# Make within fold test train splits
Kx_train, Kx_test = X_train[train_index], X_train[test_index]
Ky_train, Ky_test = y_train.iloc[train_index], y_train.iloc[test_index]
# Train the model and make a prediction
mod = sm.OLS(Ky_train, Kx_train, hasconst=True).fit()
train_predicted = mod.predict(Kx_test)
# Calculate the error
cur_rmse = rmse(Ky_test, train_predicted)
# Add it to the error list
rmse_arr.append(cur_rmse)
# The average of the error list is a good estimate of the training error
return np.average(rmse_arr)
def undocked_stations(city):
'''Returns a dataframe with aggregates (mean distances and count) for each station
'''
# Grab full data from desired city
if city == 'SF':
df = pd.read_csv('data/sf2.csv', index_col='Unnamed: 0')
elif city == 'CH':
df = pd.read_csv('data/ch1.csv', index_col='Unnamed: 0')
else:
return None
# Only want to assess undocked trips and our target feature
df = df[df['closest_dist'] > 0]
df = df[['closest_id', 'closest_dist']]
# Make each row a station listing it's mean distance and count for rides nearest it
df = df.groupby('closest_id').agg(['mean', 'count']).sort_values(('closest_dist', 'count'), ascending=False)
# Add station lat/long
return df.merge(grab_geo(city), left_index=True, right_index=True)
def graph_distances(city, ax):
'''Function to graph undocked bike distances histogram
recommended figsize=(12,8)
'''
# Grab target data from desired city
if city == 'SF':
y = pd.read_csv('data/sf.csv', usecols=['closest_dist'])
c = 'San Francisco'
elif city == 'CH':
y = pd.read_csv('data/ch.csv', usecols=['closest_dist'])
c = 'Chicago'
else:
return None
# Only looking at distances between 1 - 750
y = y[y > 0]
y = y[y < 750]
# Graph and peripherals
ax.hist(y, bins=60)
ax.set_xlabel('Distance from Nearest Station (m)', fontsize=14)
ax.set_ylabel('Frequency\n(Number of Rides)', fontsize=14)
ax.set_title('Distance from Nearest Station Histogram - {}'.format(c), fontsize=16)
def cross_val(X_train, y_train, k):
'''Basic CV function that works with OLS specifically
Returns average RMSE for training error.
'''
rmse_arr = []
kf = KFold(n_splits=k)
# Each loop adds the rmse for that fold
for train_index, test_index in kf.split(X_train):
# necessary indices from a given fold
Kx_train, Kx_test = X_train[train_index], X_train[test_index]
Ky_train, Ky_test = y_train.iloc[train_index], y_train.iloc[test_index]
# train a new model with this fold
mod = sm.OLS(Ky_train, Kx_train, hasconst=True).fit()
# calculate error based on prediction
train_predicted = mod.predict(Kx_test)
cur_rmse = mean_squared_error(Ky_test, train_predicted)**0.5
# and add to list
rmse_arr.append(cur_rmse)
# return the average of error list
return np.average(rmse_arr) | [
"[email protected]"
] | |
81f04dc9ed83f70169a1af6953c7c4c5a7a0aa80 | a0e3266fb10b9c4a94ee817763d7e55520ceb8f2 | /microservices/svc/divider/handler/__init__.py | a61333d61ea10d294469a67f5ebc749f82ca6f2d | [
"MIT"
] | permissive | sato-mh/distributed-calculator | d90cc15c9fb37d887ad0376e56fbc9de0323fe53 | 8d044084a0f70effe5264f3a726962e3ac8da7f5 | refs/heads/master | 2023-07-25T00:54:56.207918 | 2021-09-06T06:25:50 | 2021-09-06T06:25:50 | 402,721,643 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 43 | py | from .divider import Divider # noqa: F401
| [
"[email protected]"
] | |
cda7ba7a617b163349b84eaacc3643c872f9ecb4 | cc10b80f143684270a7f4db7c73ada125309e3fa | /Clone_Graph_133.py | a30b91f3fded5f475207df06119519d02a45d648 | [] | no_license | jay6413682/Leetcode | bfe8bf15183aa824fb96d9ff5a1ead5b7b7fa4d9 | 3ea03cd8b1fa507553ebee4fd765c4cc4b5814b6 | refs/heads/master | 2023-03-04T19:43:31.477643 | 2023-03-02T06:34:50 | 2023-03-02T06:34:50 | 30,943,538 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,067 | py | # Definition for a Node.
# graph
# ๅพ่ฎบ
class Node:
def __init__(self, val = 0, neighbors = None):
self.val = val
self.neighbors = neighbors if neighbors is not None else []
class Solution:
""" Depth first search / dfs - https://leetcode-cn.com/problems/clone-graph/solution/ke-long-tu-by-leetcode-solution/
ๅคๆๅบฆๅๆ
ๆถ้ดๅคๆๅบฆ๏ผO(N)O(N)๏ผๅ
ถไธญ NN ่กจ็คบ่็นๆฐ้ใๆทฑๅบฆไผๅ
ๆ็ดข้ๅๅพ็่ฟ็จไธญๆฏไธช่็นๅชไผ่ขซ่ฎฟ้ฎไธๆฌกใ
็ฉบ้ดๅคๆๅบฆ๏ผO(N)O(N)ใๅญๅจๅ
้่็นๅๅ่็น็ๅๅธ่กจ้่ฆ O(N)O(N) ็็ฉบ้ด๏ผ้ๅฝ่ฐ็จๆ ้่ฆ O(H)O(H) ็็ฉบ้ด๏ผๅ
ถไธญ HH ๆฏๅพ็ๆทฑๅบฆ๏ผ็ป่ฟๆพ็ผฉๅฏไปฅๅพๅฐ O(H) = O(N)O(H)=O(N)๏ผๅ ๆญคๆปไฝ็ฉบ้ดๅคๆๅบฆไธบ O(N)O(N)ใ
"""
def __init__(self):
self.visited = {}
def cloneGraph(self, node: 'Node') -> 'Node':
# ๅฆๆ่ฏฅ่็นๅทฒ็ป่ขซ่ฎฟ้ฎ่ฟไบ๏ผๅ็ดๆฅไปๅๅธ่กจไธญๅๅบๅฏนๅบ็ๅ
้่็น่ฟๅ
if node in self.visited:
return self.visited[node]
if not node:
return node
# ๅ
้่็น๏ผๆณจๆๅฐไธบไบๆทฑๆท่ดๆไปฌไธไผๅ
้ๅฎ็้ปๅฑ
็ๅ่กจ
# ๅๅธ่กจๅญๅจ
# first needs to store cloned node without neighbors, so when recursive calls, it can return without loop infinitely
self.visited[node] = Node(node.val)
# ้ๅ่ฏฅ่็น็้ปๅฑ
ๅนถๆดๆฐๅ
้่็น็้ปๅฑ
ๅ่กจ
for neighbor in node.neighbors:
cloned_neighbor = self.cloneGraph(neighbor)
self.visited[node].neighbors.append(cloned_neighbor)
return self.visited[node]
class Solution2:
def cloneGraph(self, node: 'Node') -> 'Node':
"""
https://leetcode-cn.com/problems/clone-graph/solution/ke-long-tu-by-leetcode-solution/
ๅคๆๅบฆๅๆ
ๆถ้ดๅคๆๅบฆ๏ผO(N)O(N)๏ผๅ
ถไธญ NN ่กจ็คบ่็นๆฐ้ใๅนฟๅบฆไผๅ
ๆ็ดข้ๅๅพ็่ฟ็จไธญๆฏไธช่็นๅชไผ่ขซ่ฎฟ้ฎไธๆฌกใ
็ฉบ้ดๅคๆๅบฆ๏ผO(N)O(N)ใๅๅธ่กจไฝฟ็จ O(N)O(N) ็็ฉบ้ดใๅนฟๅบฆไผๅ
ๆ็ดขไธญ็้ๅๅจๆๅๆ
ๅตไธไผ่พพๅฐ O(N)O(N) ็็ฉบ้ดๅคๆๅบฆ๏ผๅ ๆญคๆปไฝ็ฉบ้ดๅคๆๅบฆไธบ O(N)O(N)ใ
"""
if not node:
return node
# ๅ
้็ฌฌไธไธช่็นๅนถๅญๅจๅฐๅๅธ่กจไธญ
visited = {node: Node(node.val)}
# ๅฐ้ข็ฎ็ปๅฎ็่็นๆทปๅ ๅฐ้ๅ
nodes = [node]
# ๅนฟๅบฆไผๅ
ๆ็ดข
while nodes:
# ๅๅบ้ๅ็ๅคด่็น
curr = nodes.pop(0)
# ้ๅ่ฏฅ่็น็้ปๅฑ
for neighbor in curr.neighbors:
if neighbor not in visited:
# ๅฆๆๆฒกๆ่ขซ่ฎฟ้ฎ่ฟ๏ผๅฐฑๅ
้ๅนถๅญๅจๅจๅๅธ่กจไธญ
visited[neighbor] = Node(neighbor.val)
# ๅฐ้ปๅฑ
่็นๅ ๅ
ฅ้ๅไธญ
nodes.append(neighbor)
# ๆดๆฐๅฝๅclone่็น็้ปๅฑ
ๅ่กจ
visited[curr].neighbors.append(visited[neighbor])
return visited[node]
| [
"[email protected]"
] | |
1466c5381a233e72613b78028a7e34537d4795e1 | b0ba7f0810460661846ee179c3e1ee24e9ba19c2 | /dailyfresh/dailyfresh/urls.py | b1707d910e8ffba47d96470b9544fd25a73e7e54 | [] | no_license | kaoven/dailyfresh | 6c47e384d035612be8f309861248e7165cf6a6cb | 0b9425f573ea030bbd3505ca6a75ad40bb996d83 | refs/heads/master | 2021-05-09T21:38:54.964315 | 2018-01-24T07:57:31 | 2018-01-24T07:57:31 | 118,731,559 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,214 | py | """dailyfresh URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^tinymce/', include('tinymce.urls')), # ๅฏๆๆฌ็ผ่พๅจ
url(r'^search', include('haystack.urls')), # ๅ
จๆๆฃ็ดข
url(r'^user/', include('apps.user.urls', namespace='user')), # ็จๆทๆจกๅ
url(r'^cart/', include('apps.cart.urls', namespace='cart')), # ่ดญ็ฉ่ฝฆๆจกๅ
url(r'^order/', include('apps.order.urls', namespace='order')), # ่ฎขๅๆจกๅ
url(r'^', include('apps.goods.urls', namespace='goods')), # ๅๅๆจกๅ
]
| [
"[email protected]"
] | |
3ce171e90e7e5da03b709cdd437fabc15e85d450 | 0d8486c1d55c40bebea7c5428930f18165d2d0e9 | /tests/wasp1/AllAnswerSets/grounding_ordering_7.test.py | 8e6844bc87c31d8550bcba0188c68ca865f9af30 | [
"Apache-2.0"
] | permissive | bernardocuteri/wasp | 6f81bf6aa8fb273c91bbf68ecce4ecb195a55953 | 05c8f961776dbdbf7afbf905ee00fc262eba51ad | refs/heads/master | 2021-06-08T11:58:25.080818 | 2020-10-05T16:57:37 | 2020-10-05T16:57:37 | 124,245,808 | 0 | 0 | Apache-2.0 | 2018-03-07T14:13:16 | 2018-03-07T14:13:16 | null | UTF-8 | Python | false | false | 75 | py | input = """
#maxint=0.
f(Z):- Z=Y+0, #int(Y).
"""
output = """
{f(0)}
"""
| [
"[email protected]"
] | |
bc986847479e746b487e2e67ecbc112c6ac46db4 | eb446fc63e6cb700059a9b388b6e48556d46d892 | /assignment1/cs231n/classifiers/k_nearest_neighbor.py | 80143906f2cad87ed3c5c7570bc06dcf7b4af353 | [] | no_license | Libardo1/cs231n | 575360963ab0bfa376c0d41a0f75204fb6d80628 | 885c83e049d1bf1b77a60d6f46c496789a2f1d61 | refs/heads/master | 2020-05-28T02:41:10.986705 | 2017-01-23T00:02:38 | 2017-01-23T00:02:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,475 | py | import numpy as np
class KNearestNeighbor(object):
""" a kNN classifier with L2 distance """
def __init__(self):
pass
def train(self, X, y):
"""
Train the classifier. For k-nearest neighbors this is just
memorizing the training data.
Inputs:
- X: A numpy array of shape (num_train, D) containing the training data
consisting of num_train samples each of dimension D.
- y: A numpy array of shape (N,) containing the training labels, where
y[i] is the label for X[i].
"""
self.X_train = X
self.y_train = y
def predict(self, X, k=1, num_loops=0):
"""
Predict labels for test data using this classifier.
Inputs:
- X: A numpy array of shape (num_test, D) containing test data consisting
of num_test samples each of dimension D.
- k: The number of nearest neighbors that vote for the predicted labels.
- num_loops: Determines which implementation to use to compute distances
between training points and testing points.
Returns:
- y: A numpy array of shape (num_test,) containing predicted labels for the
test data, where y[i] is the predicted label for the test point X[i].
"""
if num_loops == 0:
dists = self.compute_distances_no_loops(X)
elif num_loops == 1:
dists = self.compute_distances_one_loop(X)
elif num_loops == 2:
dists = self.compute_distances_two_loops(X)
else:
raise ValueError('Invalid value %d for num_loops' % num_loops)
return self.predict_labels(dists, k=k)
def compute_distances_two_loops(self, X):
"""
Compute the distance between each test point in X and each training point
in self.X_train using a nested loop over both the training data and the
test data.
Inputs:
- X: A numpy array of shape (num_test, D) containing test data.
Returns:
- dists: A numpy array of shape (num_test, num_train) where dists[i, j]
is the Euclidean distance between the ith test point and the jth training
point.
"""
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dists = np.zeros((num_test, num_train))
for i in xrange(num_test):
for j in xrange(num_train):
#####################################################################
# TODO: #
# Compute the l2 distance between the ith test point and the jth #
# training point, and store the result in dists[i, j]. You should #
# not use a loop over dimension. #
#####################################################################
dists[i, j] = np.sqrt(np.sum(np.square(self.X_train[j,:] - X[i,:])))
#####################################################################
# END OF YOUR CODE #
#####################################################################
return dists
def compute_distances_one_loop(self, X):
"""
Compute the distance between each test point in X and each training point
in self.X_train using a single loop over the test data.
Input / Output: Same as compute_distances_two_loops
"""
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dists = np.zeros((num_test, num_train))
for i in xrange(num_test):
#######################################################################
# TODO: #
# Compute the l2 distance between the ith test point and all training #
# points, and store the result in dists[i, :]. #
#######################################################################
dists[i, :] = np.sqrt(np.sum(np.square(self.X_train - X[i,:]), 1))
#######################################################################
# END OF YOUR CODE #
#######################################################################
return dists
def compute_distances_no_loops(self, X):
"""
Compute the distance between each test point in X and each training point
in self.X_train using no explicit loops.
Input / Output: Same as compute_distances_two_loops
"""
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dists = np.zeros((num_test, num_train))
#########################################################################
# TODO: #
# Compute the l2 distance between all test points and all training #
# points without using any explicit loops, and store the result in #
# dists. #
# #
# You should implement this function using only basic array operations; #
# in particular you should not use functions from scipy. #
# #
# HINT: Try to formulate the l2 distance using matrix multiplication #
# and two broadcast sums. #
#########################################################################
dists = -2*np.dot(X,self.X_train.T) + \
np.tile(np.reshape(np.sum(np.square(X), 1), (num_test, 1)), (1, num_train)) + \
np.tile(np.reshape(np.sum(np.square(self.X_train), 1), (num_train, 1)), (1, num_test)).T
dists = np.sqrt(dists)
#########################################################################
# END OF YOUR CODE #
#########################################################################
return dists
def predict_labels(self, dists, k=1):
"""
Given a matrix of distances between test points and training points,
predict a label for each test point.
Inputs:
- dists: A numpy array of shape (num_test, num_train) where dists[i, j]
gives the distance betwen the ith test point and the jth training point.
Returns:
- y: A numpy array of shape (num_test,) containing predicted labels for the
test data, where y[i] is the predicted label for the test point X[i].
"""
num_test = dists.shape[0]
y_pred = np.zeros(num_test)
for i in xrange(num_test):
# A list of length k storing the labels of the k nearest neighbors to
# the ith test point.
closest_y = []
#########################################################################
# TODO: #
# Use the distance matrix to find the k nearest neighbors of the ith #
# testing point, and use self.y_train to find the labels of these #
# neighbors. Store these labels in closest_y. #
# Hint: Look up the function numpy.argsort. #
#########################################################################
inds = np.argsort(dists[i,:])
closest_y = self.y_train[inds[:k]]
#########################################################################
# TODO: #
# Now that you have found the labels of the k nearest neighbors, you #
# need to find the most common label in the list closest_y of labels. #
# Store this label in y_pred[i]. Break ties by choosing the smaller #
# label. #
#########################################################################
closest_y.sort()
maxn = 1
besty = closest_y[0]
count = 1
for j in range(1, k):
if closest_y[j] == closest_y[j-1]:
count += 1
else:
if count > maxn:
besty = closest_y[j-1]
maxn = count
count = 1
if count > maxn:
besty = closest_y[-1]
y_pred[i] = besty
#########################################################################
# END OF YOUR CODE #
#########################################################################
return y_pred
| [
"[email protected]"
] | |
42668eb3cec8a2407f7d7c9d973957dc2e908b51 | 3171e9f9c3312e617aad8e984410d931c368f3a4 | /web_socket_server/node_modules/ws/build/config.gypi | 43bf8528ef09587c308e1e3f21a5b81501c4f91a | [
"MIT"
] | permissive | jjojo/jsmoker | b08e1793a9eef089f95893d563e0253987e6a487 | 065f7a1aeb303082e0d5f7d771b3764947891446 | refs/heads/master | 2021-01-25T05:10:20.715928 | 2017-09-30T10:18:47 | 2017-09-30T10:18:47 | 93,518,127 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,992 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"host_arch": "x64",
"icu_data_file": "icudt56l.dat",
"icu_data_in": "../../deps/icu/source/data/in/icudt56l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "./deps/icu",
"icu_small": "true",
"icu_ver_major": "56",
"llvm_version": 0,
"node_byteorder": "little",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "true",
"node_prefix": "/",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_lttng": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"openssl_fips": "",
"openssl_no_asm": 0,
"target_arch": "x64",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "true",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_random_seed": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"xcode_version": "7.0",
"nodedir": "/Users/Jonas/.node-gyp/4.6.0",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"dry_run": "",
"legacy_bundling": "",
"save_dev": "",
"browser": "",
"only": "",
"viewer": "man",
"also": "",
"rollback": "true",
"usage": "",
"globalignorefile": "/Users/Jonas/.nvm/versions/node/v4.6.0/etc/npmignore",
"init_author_url": "",
"maxsockets": "50",
"shell": "/bin/bash",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"fetch_retries": "2",
"npat": "",
"registry": "https://registry.npmjs.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/Users/Jonas/.nvm/versions/node/v4.6.0/etc/npmrc",
"always_auth": "",
"cache_lock_retries": "10",
"global_style": "",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"proprietary_attribs": "true",
"access": "",
"json": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/Jonas/.npm-init.js",
"userconfig": "/Users/Jonas/.npmrc",
"node_version": "4.6.0",
"user": "501",
"save": "true",
"editor": "vi",
"tag": "latest",
"global": "",
"progress": "true",
"optional": "true",
"bin_links": "true",
"force": "",
"searchopts": "",
"depth": "Infinity",
"rebuild_bundle": "true",
"searchsort": "name",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"strict_ssl": "true",
"tag_version_prefix": "v",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"save_exact": "",
"cache_lock_stale": "60000",
"version": "",
"cache_min": "10",
"cache": "/Users/Jonas/.npm",
"searchexclude": "",
"color": "true",
"save_optional": "",
"user_agent": "npm/3.10.8 node/v4.6.0 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"init_version": "1.0.0",
"umask": "0022",
"git": "git",
"init_author_name": "",
"scope": "",
"onload_script": "",
"tmp": "/var/folders/7_/j6zw8_bj42qdf7zd82_zncdh0000gn/T",
"unsafe_perm": "true",
"link": "",
"prefix": "/Users/Jonas/.nvm/versions/node/v4.6.0"
}
}
| [
"[email protected]"
] | |
a64c504346d22024d2985e910aa57b068028ce5f | 8d00a56744d81106917e0d4805545fcde2234ca5 | /.ycm_extra_conf.py | d444cf5a433288a169de9f2b4855b2008f57a1f4 | [] | no_license | wqx081/mpr_media | 9bd913d60d060d4a886a89ce2824de285445aa24 | 065e80c340404448f49d16ee56e0d5fdcde361bf | refs/heads/master | 2020-04-06T07:10:18.608930 | 2016-08-30T12:36:56 | 2016-08-30T12:36:56 | 65,811,513 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,401 | py | # This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
#'-Wc++98-compat',
'-Wno-long-long',
'-Wno-deprecated-declarations',
'-Wno-variadic-macros',
'-fexceptions',
'-DNDEBUG',
# You 100% do NOT need -DUSE_CLANG_COMPLETER in your flags; only the YCM
# source code needs it.
'-DUSE_CLANG_COMPLETER',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=c++11',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c++',
'-isystem',
'../BoostParts',
'-isystem',
# This path will only work on OS X, but extra paths that don't exist are not
# harmful
'/System/Library/Frameworks/Python.framework/Headers',
'-isystem',
'../llvm/include',
'-isystem',
'../llvm/tools/clang/include',
'-I',
'.',
'-I',
'./ClangCompleter',
'-isystem',
'./tests/gmock/gtest',
'-isystem',
'./tests/gmock/gtest/include',
'-isystem',
'./tests/gmock',
'-isystem',
'./tests/gmock/include',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
try:
final_flags.remove( '-stdlib=libc++' )
except ValueError:
pass
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
| [
"[email protected]"
] | |
e01a89ed525c71b84c344fd6d92743fe91d4cad2 | 3e6e8c70187058bef49c2270b66ce32e0acb77e8 | /features/steps/register.py | 1cd04a2c0e113d8316a4fc8a0cdc132c6d480422 | [] | no_license | rizkimp/register_automated_testing | e23e91d893d6faca8baf5a37eb2cbb859e3cd3ae | 6ee5dd64c3e64c457a046e3a0b735e2817c9027b | refs/heads/master | 2021-05-25T08:34:32.902077 | 2020-04-07T09:11:10 | 2020-04-07T09:11:10 | 253,742,817 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,962 | py | import strgen #library to generate random string
from selenium import webdriver
from behave import *
from locators import *
from selenium.webdriver.common.by import By
from time import sleep
@given(u'prepare to register')
def step_impl(context):
context.browser.implicitly_wait(30)
context.browser.find_element(By.XPATH,locator.navbar)
context.browser.implicitly_wait(30)
context.browser.find_element(By.XPATH,locator.slide)
@when(u'input valid data')
def step_impl(context):
context.browser.implicitly_wait(10)
context.browser.find_element(By.XPATH,locator.button_register1).click()
sleep(1)
context.browser.implicitly_wait(10)
context.browser.find_element(By.XPATH,locator.form_register)
sleep(1)
#generate random string for uniq username and email
username = strgen.StringGenerator("[\w\d]{5}").render()
email = "@email.automation"
context.browser.implicitly_wait(10)
context.browser.find_element(By.XPATH,locator.input_email).send_keys("%s%s" % (username,email))
sleep(1)
context.browser.implicitly_wait(10)
context.browser.find_element(By.XPATH,locator.input_username).send_keys("%s" % (username))
sleep(1)
context.browser.implicitly_wait(10)
context.browser.find_element(By.XPATH,locator.input_password1).send_keys("12345678wasd")
sleep(1)
context.browser.implicitly_wait(10)
context.browser.find_element(By.XPATH,locator.input_password2).send_keys("12345678wasd")
sleep(1)
context.browser.implicitly_wait(10)
context.browser.find_element(By.XPATH,locator.button_register2).click()
sleep(3)
@then(u'success register')
def step_impl(context):
context.browser.implicitly_wait(10)
context.browser.find_element(By.XPATH,locator.navbar)
context.browser.implicitly_wait(10)
context.browser.find_element(By.XPATH,locator.avatar_plain)
context.browser.implicitly_wait(10)
context.browser.find_element(By.XPATH,locator.slide)
| [
"[email protected]"
] | |
01019e95e6592c02760b57f473812ecce1c11855 | 952b3b0a91de7c561712ac14447ae87143d9e033 | /topo_script.py | 3e7f04333e46cf043742112be55aea11b8d516ce | [] | no_license | mmarchini/tp-protocolos | ad6f8cbbd280bb80443a4c308f2270ee9772a042 | 28a92c46b60ff4912fd7369aac8bf53155a31365 | refs/heads/master | 2020-04-14T02:31:35.297582 | 2015-06-26T03:27:31 | 2015-06-26T03:27:31 | 33,481,593 | 0 | 0 | null | 2015-06-26T03:27:31 | 2015-04-06T12:47:22 | Python | UTF-8 | Python | false | false | 1,953 | py | #!/usr/bin/python
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.util import dumpNodeConnections
from mininet.log import setLogLevel
class EvalTopo (Topo):
"Evaluation Topo for Green Network"
def __init__( self ):
Topo.__init__( self )
# Adding switches and nodes (one node to each switch)
s1 = self.addSwitch('s1')
h1 = self.addHost('h1', ip='10.0.0.1', mac='00:04:00:00:00:01')
s2 = self.addSwitch('s2')
h2 = self.addHost('h2', ip='10.0.0.2', mac='00:04:00:00:00:02')
s3 = self.addSwitch('s3')
h3 = self.addHost('h3', ip='10.0.0.3', mac='00:04:00:00:00:03')
s4 = self.addSwitch('s4')
h4 = self.addHost('h4', ip='10.0.0.4', mac='00:04:00:00:00:04')
s5 = self.addSwitch('s5')
h5 = self.addHost('h5', ip='10.0.0.5', mac='00:04:00:00:00:05')
s6 = self.addSwitch('s6')
h6 = self.addHost('h6', ip='10.0.0.6', mac='00:04:00:00:00:06')
s7 = self.addSwitch('s7')
h7 = self.addHost('h7', ip='10.0.0.7', mac='00:04:00:00:00:07')
s8 = self.addSwitch('s8')
s9 = self.addSwitch('s9')
s10 = self.addSwitch('s10')
# Creating links between hosts and switches
self.addLink(h1, s1)
self.addLink(h2, s2)
self.addLink(h3, s3)
self.addLink(h4, s4)
self.addLink(h5, s5)
self.addLink(h6, s6)
self.addLink(h7, s7)
#Creating links between switches
self.addLink(s1, s2)
self.addLink(s1, s7)
self.addLink(s1, s8)
self.addLink(s1, s10)
self.addLink(s2, s10)
self.addLink(s2, s3)
self.addLink(s3, s9)
self.addLink(s3, s4)
self.addLink(s4, s5)
self.addLink(s4, s8)
self.addLink(s5, s6)
self.addLink(s5, s7)
self.addLink(s6, s7)
self.addLink(s7, s8)
self.addLink(s8, s9)
self.addLink(s8, s10)
self.addLink(s9, s10)
topos = {'evaltopo' : (lambda: EvalTopo() ) }
| [
"[email protected]"
] | |
275930b8747f46d9c54103de1c83df74ae486c27 | 25b914aecd6b0cb49294fdc4f2efcfdf5803cc36 | /homeassistant/components/keenetic_ndms2/config_flow.py | a4ef406dd92f6b84fb23b067002c341c4384e263 | [
"Apache-2.0"
] | permissive | jason0x43/home-assistant | 9114decaa8f7c2f1582f84e79dc06736b402b008 | 8bf6aba1cf44ee841de063755c935ea78040f399 | refs/heads/dev | 2023-03-04T01:14:10.257593 | 2022-01-01T12:11:56 | 2022-01-01T12:11:56 | 230,622,861 | 1 | 1 | Apache-2.0 | 2023-02-22T06:15:07 | 2019-12-28T14:45:43 | Python | UTF-8 | Python | false | false | 6,847 | py | """Config flow for Keenetic NDMS2."""
from __future__ import annotations
from typing import Any
from urllib.parse import urlparse
from ndms2_client import Client, ConnectionException, InterfaceInfo, TelnetConnection
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components import ssdp
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_SCAN_INTERVAL,
CONF_USERNAME,
)
from homeassistant.core import callback
from homeassistant.data_entry_flow import FlowResult
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.typing import ConfigType
from .const import (
CONF_CONSIDER_HOME,
CONF_INCLUDE_ARP,
CONF_INCLUDE_ASSOCIATED,
CONF_INTERFACES,
CONF_TRY_HOTSPOT,
DEFAULT_CONSIDER_HOME,
DEFAULT_INTERFACE,
DEFAULT_SCAN_INTERVAL,
DEFAULT_TELNET_PORT,
DOMAIN,
ROUTER,
)
from .router import KeeneticRouter
class KeeneticFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow."""
VERSION = 1
@staticmethod
@callback
def async_get_options_flow(
config_entry: ConfigEntry,
) -> KeeneticOptionsFlowHandler:
"""Get the options flow for this handler."""
return KeeneticOptionsFlowHandler(config_entry)
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle a flow initialized by the user."""
errors = {}
if user_input is not None:
host = self.context.get(CONF_HOST) or user_input[CONF_HOST]
self._async_abort_entries_match({CONF_HOST: host})
_client = Client(
TelnetConnection(
host,
user_input[CONF_PORT],
user_input[CONF_USERNAME],
user_input[CONF_PASSWORD],
timeout=10,
)
)
try:
router_info = await self.hass.async_add_executor_job(
_client.get_router_info
)
except ConnectionException:
errors["base"] = "cannot_connect"
else:
return self.async_create_entry(
title=router_info.name, data={CONF_HOST: host, **user_input}
)
host_schema = (
{vol.Required(CONF_HOST): str} if CONF_HOST not in self.context else {}
)
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
**host_schema,
vol.Required(CONF_USERNAME): str,
vol.Required(CONF_PASSWORD): str,
vol.Optional(CONF_PORT, default=DEFAULT_TELNET_PORT): int,
}
),
errors=errors,
)
async def async_step_import(
self, user_input: ConfigType | None = None
) -> FlowResult:
"""Import a config entry."""
return await self.async_step_user(user_input)
async def async_step_ssdp(self, discovery_info: ssdp.SsdpServiceInfo) -> FlowResult:
"""Handle a discovered device."""
friendly_name = discovery_info.upnp.get(ssdp.ATTR_UPNP_FRIENDLY_NAME, "")
# Filter out items not having "keenetic" in their name
if "keenetic" not in friendly_name.lower():
return self.async_abort(reason="not_keenetic_ndms2")
# Filters out items having no/empty UDN
if not discovery_info.upnp.get(ssdp.ATTR_UPNP_UDN):
return self.async_abort(reason="no_udn")
host = urlparse(discovery_info.ssdp_location).hostname
await self.async_set_unique_id(discovery_info.upnp[ssdp.ATTR_UPNP_UDN])
self._abort_if_unique_id_configured(updates={CONF_HOST: host})
self._async_abort_entries_match({CONF_HOST: host})
self.context[CONF_HOST] = host
self.context["title_placeholders"] = {
"name": friendly_name,
"host": host,
}
return await self.async_step_user()
class KeeneticOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle options."""
def __init__(self, config_entry: ConfigEntry) -> None:
"""Initialize options flow."""
self.config_entry = config_entry
self._interface_options = {}
async def async_step_init(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Manage the options."""
router: KeeneticRouter = self.hass.data[DOMAIN][self.config_entry.entry_id][
ROUTER
]
interfaces: list[InterfaceInfo] = await self.hass.async_add_executor_job(
router.client.get_interfaces
)
self._interface_options = {
interface.name: (interface.description or interface.name)
for interface in interfaces
if interface.type.lower() == "bridge"
}
return await self.async_step_user()
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Manage the device tracker options."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
options = vol.Schema(
{
vol.Required(
CONF_SCAN_INTERVAL,
default=self.config_entry.options.get(
CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL
),
): int,
vol.Required(
CONF_CONSIDER_HOME,
default=self.config_entry.options.get(
CONF_CONSIDER_HOME, DEFAULT_CONSIDER_HOME
),
): int,
vol.Required(
CONF_INTERFACES,
default=self.config_entry.options.get(
CONF_INTERFACES, [DEFAULT_INTERFACE]
),
): cv.multi_select(self._interface_options),
vol.Optional(
CONF_TRY_HOTSPOT,
default=self.config_entry.options.get(CONF_TRY_HOTSPOT, True),
): bool,
vol.Optional(
CONF_INCLUDE_ARP,
default=self.config_entry.options.get(CONF_INCLUDE_ARP, True),
): bool,
vol.Optional(
CONF_INCLUDE_ASSOCIATED,
default=self.config_entry.options.get(
CONF_INCLUDE_ASSOCIATED, True
),
): bool,
}
)
return self.async_show_form(step_id="user", data_schema=options)
| [
"[email protected]"
] | |
87d27e5efe16f8ce13fbba0b38498b6201761947 | 6a88140f644ee79ee76edca2f64d75f4ad6dc772 | /New/Function/lrtmmse.py | 8c24df455c7519ca2756882fd6795ddc4eaeda6b | [] | no_license | KennethBenicio/BSc-LRT-Filter-Design | bbca2ac3e6c2bb936c40ddbc72fb90d62a6e36bc | 0e536beb5a5ef4a3a5cd6896f2de32b16ebb8ddd | refs/heads/master | 2022-11-29T23:16:00.201345 | 2020-08-19T10:37:30 | 2020-08-19T10:37:30 | 288,703,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,450 | py | import numpy as np
import tensoralgebraezequias as tensoralg
def lrt_mmse(mt_x, train_seq, ndims, order, rank, eps, num_iter):
# Auxiliary function for linear filtering(Eq.11 of the thesis)
def mmse_filter(mt_list, rank, order):
w = 0
for r in range(rank):
columns = [mt_list[d][:, [r]] for d in range(order)]
w += tensoralg.kron(*columns[::-1])
return w
n, samples = mt_x.shape
modes = np.arange(order) # j modes list
mt_w = [None] * order # matrices w_{d}
# initializing w_{d,r}:
for d in range(order): # w_{d} = [w_{d,r} ... ]
mt_w[d] = np.zeros((ndims[d], rank), dtype=complex)
# initializing w_{d, r} = [1 0 0 ...]
mt_w[d][0, :] = np.random.rand(rank)
# Construction of w_0: vec(W)
w_aux = mmse_filter(mt_w, rank, order)
# Storing errors:
errors = np.zeros(num_iter)
# Reshaping X as the Tensor:
dim = order + 1
shape = ndims + [samples]
ten_x = np.reshape(mt_x, shape, order='F').T.swapaxes(dim - 2, dim - 1)
# Filtering algorithm:
for i in range(num_iter):
for d in range(order):
mt_u_dr = [None] * rank
for r in range(rank):
# select j modes != d and w_{j,r} columns of w_{d}
mask = np.ones(order, dtype=bool)
mask[d] = False
# hermitian w_{j,r}
w_jr = [mt_w[j][:, [r]].conj().T
for j in range(order) if mask[j]]
# Build U_{d, r}
u_dr = tensoralg.m_mode_prod(ten_x, w_jr, modes[mask]).reshape(samples, ndims[d]).T
# transpose U_{d, r}
mt_u_dr[r] = u_dr.T
# Forming U_{d}:
mt_u_d = np.hstack(*[mt_u_dr]).T
# Covariances:
mt_cov = (1 / samples) * mt_u_d @ mt_u_d.conj().T
vt_cov = (1 / samples) * mt_u_d @ train_seq.conj()
# Update filter stacked R w_{d, r} columns as RN_{d} x 1:
w_d_mmse = np.linalg.inv(mt_cov) @ vt_cov
# Update w_{d}:
mt_w[d] = tensoralg.unvec(w_d_mmse, ndims[d], rank)
# Constructing w_i:
vt_w = mmse_filter(mt_w, rank, order)
# Error and convergence:
errors[i] = np.linalg.norm(vt_w - w_aux) ** 2
if errors[i] <= eps:
break
else:
w_aux = vt_w
return vt_w, mt_w, ten_x, errors, i | [
"[email protected]"
] | |
a7ea21bd915a48e135969d4d8566ce726b7d0f41 | 84bf86fac2a0e615381a2f3d515d452a3011c2c4 | /Scripts/Generate-Benchmark/process.eqtl.py | 457c14fc4c03f88c96117b663a2c4d13192e4596 | [] | no_license | Jill-Moore/Target-Gene-Prediction | 2a126dcfdeab5b8ab5683d7dfc17205afbddbf77 | 6f4fa6a6d7a4b5b9f8e8dda8e12dcaeb4bfa84c6 | refs/heads/master | 2022-09-10T13:16:04.522233 | 2020-05-29T20:11:39 | 2020-05-29T20:11:39 | 109,743,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,408 | py | import sys, subprocess
def Create_Bed_Files(tsv, enhancer, tss):
bedDict={}
bed1=open("bed1","w+")
i=1
tsv.next()
for line in tsv:
line=line.rstrip().split("\t")
if "_" in line[0]:
x=line[0].split("_")
print >> bed1, "chr"+x[0]+"\t"+str(int(x[1])-1)+"\t"+x[1]+"\t"+line[1].split(".")[0]
i += 1
bed1.close()
out1=open("out1","w+")
subprocess.call(["bedtools", "intersect", "-wo","-a", "bed1", "-b", enhancer], stdout=out1)
out1.close()
out1=open("out1")
for line in out1:
line=line.rstrip().split("\t")
if line[3] not in bedDict:
bedDict[line[3]]=[line[7]]
else:
if line[7] not in bedDict[line[3]]:
bedDict[line[3]].append(line[7])
out1.close()
return bedDict
def Create_Gene_Dict(tss):
geneDict={}
tss=open(tss)
for line in tss:
line=line.rstrip().split("\t")
master=line[6].split(".")[0]
if master not in geneDict:
geneDict[master]=line[6]
tss.close()
return geneDict
tsv=open(sys.argv[1])
enhancer=sys.argv[2]
tss=sys.argv[3]
bedDict = Create_Bed_Files(tsv, enhancer, tss)
geneDict=Create_Gene_Dict(tss)
tsv.close()
n=1
for gene in bedDict:
try:
g=geneDict[gene]
for els in bedDict[gene]:
print els+"\t"+g+"\t"+"Link-"+str(n)
n+=1
except:
pass
| [
"[email protected]"
] | |
c9536efbd494fd2e1864bdd6903e6cb586bf2af3 | 961490d5ed1cc536d4d4b88c3da5222c9002dc2f | /BlogFrame/urls.py | 47f5293b1c0fef56f64146b5f893e132195a1c29 | [] | no_license | RonilM/BlogFrame | 4806d1561aacac77cdb9762a46dae3e322c2f8a3 | 29c0d502dcdb47c58abefbdc9e6240867a4425e1 | refs/heads/master | 2021-01-11T21:28:24.604064 | 2017-01-18T18:19:29 | 2017-01-18T18:19:29 | 78,790,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 814 | py | """BlogFrame URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include,url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^blog/', include('blog.urls'))
]
| [
"[email protected]"
] | |
468f4f7c9a28f40036e7b4041c1c203c309f2f74 | 9d2b8e5228e735bac0d9cd1d4bbdf501d1d57190 | /src/loss_and_metrics.py | 7756ac13b1b6760a7efc01b383994e7e3adf94a7 | [] | no_license | ilijagjorgjiev/project_2_ml | 48b698a44be69b9de19748b138ec80ace6d63f21 | 59ab44fc432a613896a0081ca5ef8b35e9c3bb14 | refs/heads/master | 2020-04-07T20:46:05.291056 | 2019-02-04T10:48:03 | 2019-02-04T10:48:03 | 158,702,388 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,195 | py | from keras import backend as K
import tensorflow as tf
#We create our own metrics, basically the f1_score so we can see how well are we doing
def f1(y_true, y_pred):
y_pred = K.round(y_pred)
tp = K.sum(K.cast(y_true*y_pred, 'float'), axis=0)
tn = K.sum(K.cast((1-y_true)*(1-y_pred), 'float'), axis=0)
fp = K.sum(K.cast((1-y_true)*y_pred, 'float'), axis=0)
fn = K.sum(K.cast(y_true*(1-y_pred), 'float'), axis=0)
p = tp / (tp + fp + K.epsilon())
r = tp / (tp + fn + K.epsilon())
f1 = 2*p*r / (p+r+K.epsilon())
f1 = tf.where(tf.is_nan(f1), tf.zeros_like(f1), f1)
return K.mean(f1)
#We create our own loss function, the f1_loss which will optimize to get a better f1_score actually being linear
def f1_loss(y_true, y_pred):
tp = K.sum(K.cast(y_true*y_pred, 'float'), axis=0)
tn = K.sum(K.cast((1-y_true)*(1-y_pred), 'float'), axis=0)
fp = K.sum(K.cast((1-y_true)*y_pred, 'float'), axis=0)
fn = K.sum(K.cast(y_true*(1-y_pred), 'float'), axis=0)
p = tp / (tp + fp + K.epsilon())
r = tp / (tp + fn + K.epsilon())
f1 = 2*p*r / (p+r+K.epsilon())
f1 = tf.where(tf.is_nan(f1), tf.zeros_like(f1), f1)
return 1 - K.mean(f1)
| [
"[email protected]"
] | |
e1f4eac830ed128ea91bc98e024ae2d19026c306 | 5ddfcf026488400e1bc466355ccb84fc172890e1 | /sql_queries.py | 121607fd59523bcd15efb72a07e2e99282bce464 | [] | no_license | ljia-ch/ETL-S3-Redshift | 48630662df9be7cc102acb7852ac52fc17fa1752 | 814847c2416a0822ff9c4881138c295c3cf1e340 | refs/heads/main | 2023-03-11T08:45:40.319557 | 2021-02-28T18:27:29 | 2021-02-28T18:27:29 | 340,058,250 | 0 | 0 | null | 2021-02-28T18:27:29 | 2021-02-18T13:27:26 | Python | UTF-8 | Python | false | false | 7,695 | py | import configparser
# CONFIG
config = configparser.ConfigParser()
config.read('dwh.cfg')
ARN = config.get('IAM_ROLE', 'ARN')
LOG_DATA = config.get('S3', 'LOG_DATA')
LOG_JSONPATH = config.get('S3', 'LOG_JSONPATH')
SONG_DATA = config.get('S3', 'SONG_DATA')
# DROP TABLES
staging_events_table_drop = "DROP TABLE IF EXISTS staging_events"
staging_songs_table_drop = "DROP TABLE IF EXISTS staging_songs"
songplay_table_drop = "DROP TABLE IF EXISTS songplays"
user_table_drop = "DROP TABLE IF EXISTS users"
song_table_drop = "DROP TABLE IF EXISTS songs"
artist_table_drop = "DROP TABLE IF EXISTS artists"
time_table_drop = "DROP TABLE IF EXISTS time"
# CREATE TABLES
# STAGING TABLES (READ DATA FROM FILES ON S3 AND STOREd IN TABLES)
staging_events_table_create= ("""
CREATE TABLE staging_events (
artist VARCHAR,
auth VARCHAR,
firstName VARCHAR,
gender CHAR,
itemInSession VARCHAR,
lastName VARCHAR,
length FLOAT,
level VARCHAR,
location VARCHAR,
method VARCHAR,
page VARCHAR,
registration FLOAT,
sessionId INT,
song VARCHAR,
status INT,
ts BIGINT,
userAgent VARCHAR,
userId VARCHAR
);
""")
staging_songs_table_create = ("""
CREATE TABLE staging_songs (
num_songs INT,
artist_id VARCHAR,
artist_latitude FLOAT,
artist_longitude FLOAT,
artist_location VARCHAR,
artist_name VARCHAR,
song_id VARCHAR,
title VARCHAR,
duration FLOAT,
year INT
);
""")
# Fact Table
# songplays - records in log data associated with song plays i.e. records with page NextSong
# songplay_id, start_time, user_id, level, song_id, artist_id, session_id, location, user_agent
songplay_table_create = ("""
CREATE TABLE IF NOT EXISTS songplays (
songplay_id INT IDENTITY(0,1),
start_time TIME NOT NULL REFERENCES time (start_time),
user_id VARCHAR NOT NULL REFERENCES users (user_id),
level VARCHAR,
song_id VARCHAR NOT NULL REFERENCES songs (song_id) distkey,
artist_id VARCHAR NOT NULL REFERENCES artists (artist_id),
session_id INT,
location VARCHAR,
user_agent VARCHAR,
PRIMARY KEY (songplay_id),
CONSTRAINT time_user_song_artist_key
UNIQUE (start_time, user_id, song_id, artist_id)
)
sortkey (start_time, user_id, song_id, artist_id);
""")
user_table_create = ("""
CREATE TABLE IF NOT EXISTS users (
user_id VARCHAR sortkey,
first_name VARCHAR,
last_name VARCHAR,
gender CHAR,
level VARCHAR,
PRIMARY KEY (user_id)
);
""")
song_table_create = ("""
CREATE TABLE IF NOT EXISTS songs (
song_id VARCHAR sortkey,
title VARCHAR,
artist_id VARCHAR NOT NULL REFERENCES artists (artist_id),
year INT,
duration FLOAT,
PRIMARY KEY (song_id)
);
""")
artist_table_create = ("""
CREATE TABLE IF NOT EXISTS artists (
artist_id VARCHAR sortkey,
name VARCHAR,
location VARCHAR,
latitude FLOAT,
longitude FLOAT,
PRIMARY KEY (artist_id)
);
""")
time_table_create = ("""
CREATE TABLE IF NOT EXISTS time (
start_time TIME sortkey,
hour INT,
day INT,
week INT,
month INT,
year INT,
weekday VARCHAR,
PRIMARY KEY (start_time)
);
""")
# STAGING TABLES
# use JSON PATH define all columns in right order
staging_events_copy = ("""
copy staging_events
from {}
iam_role '{}'
json {}
region 'us-west-2';
""").format(LOG_DATA, ARN, LOG_JSONPATH)
staging_songs_copy = ("""
copy staging_songs
from {}
iam_role '{}'
region 'us-west-2'
json 'auto';
""").format(SONG_DATA, ARN)
# FINAL TABLES
songplay_table_insert = ("""
INSERT INTO songplays (
start_time,
user_id,
level,
song_id,
artist_id,
session_id,
location,
user_agent
)
SELECT DISTINCT (TIMESTAMP 'epoch' + ts/1000 * INTERVAL '1 second') AS start_time, userId as user_id, level, song_id, artist_id, sessionId as session_id, location, userAgent as user_agent
FROM staging_events se
JOIN staging_songs ss ON se.artist = ss.artist_name and se.length = ss.duration and se.song = ss.title
WHERE se.page = 'NextSong'
""")
user_table_insert = ("""
INSERT INTO users (
user_id,
first_name,
last_name,
gender,
level
)
SELECT DISTINCT userId, firstName, lastName, gender, level
FROM staging_events
WHERE page = 'NextSong'
""")
# Use GROUP BY instead of DISTINCT reason stated in line 182.
song_table_insert = ("""
INSERT INTO songs (
song_id,
title,
artist_id ,
year,
duration
)
SELECT song_id, title, artist_id, year, duration
FROM staging_songs
GROUP BY song_id, title, artist_id, year, duration
""")
# Use GROUP BY instead of DISTINCT reason stated in line 182.
artist_table_insert = ("""
INSERT INTO artists (
artist_id,
name,
location,
latitude,
longitude
)
SELECT artist_id, artist_name, artist_location, artist_latitude, artist_longitude
FROM staging_songs
GROUP BY artist_id, artist_name, artist_location, artist_latitude, artist_longitude
""")
time_table_insert = ("""
INSERT INTO time (
start_time,
hour,
day,
week,
month,
year,
weekday
)
SELECT start_time,
EXTRACT(HOUR FROM start_time) AS hour,
EXTRACT(DAY FROM start_time) AS day,
EXTRACT(WEEK FROM start_time) AS week,
EXTRACT(MONTH FROM start_time) AS month,
EXTRACT(YEAR FROM start_time) AS year,
EXTRACT(WEEKDAY FROM start_time) AS weekday
FROM
(
SELECT TIMESTAMP 'epoch' + ts/1000 * INTERVAL '1 second' AS start_time
FROM staging_events
GROUP BY ts
)AS a
GROUP BY start_time, hour, day, week, month, year, weekday
""")
# QUERY LISTS
create_table_queries = [staging_events_table_create, staging_songs_table_create, time_table_create, user_table_create, artist_table_create, song_table_create, songplay_table_create]
# Drop table order: when table has foreign key drop child table first then parent table. That means the following order
drop_table_queries = [staging_events_table_drop, staging_songs_table_drop, songplay_table_drop, user_table_drop, song_table_drop, artist_table_drop, time_table_drop]
copy_table_queries = [staging_events_copy, staging_songs_copy]
insert_table_queries = [user_table_insert, song_table_insert, artist_table_insert, time_table_insert, songplay_table_insert]
| [
"[email protected]"
] | |
d22269e7a8eb8007ffd135e4db499ad3cadf71b0 | dd716703ed930911103a9f5b52ee26ce60db75e8 | /project_code/networks/u_net.py | e2972e15fd990878ad9b0b4cc4081750ec04cb51 | [] | no_license | scidex/facial-attributes | 0c9c2e6cf00b44699cedd7323d21608d1591d0d7 | eb2aa4faf1a91987a1a782ca18f65447872822e7 | refs/heads/main | 2023-05-31T13:08:44.423992 | 2021-06-16T10:15:32 | 2021-06-16T10:15:32 | 373,831,626 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,123 | py | from project_code.networks.congregated_layers import *
import torch.nn as nn
# Define a convolutional neural network
class UNet(nn.Module):
def __init__(self, bilinear):
super(UNet, self).__init__()
self.n_channels = 3
self.bilinear = bilinear
factor = 2 if bilinear else 1
self.inc = DoubleConv(self.n_channels, 64)
self.down1 = Down(64, 128)
self.down2 = Down(128, 256)
self.down3 = Down(256, 512)
self.down4 = Down(512, 1024 // factor)
self.up1 = Up(1024, 512 // factor, bilinear)
self.up2 = Up(512, 256 // factor, bilinear)
self.up3 = Up(256, 128 // factor, bilinear)
self.up4 = Up(128, 64, bilinear)
self.outc = nn.Conv2d(64, self.n_channels, kernel_size=1)
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
x = self.outc(x)
return x
| [
"[email protected]"
] | |
0e69e140cb4ed7bf46b6255f4fd971eca10eb002 | 6298a6530ab77e7ddf14891b590a96f12c23fde4 | /PC_TCPClientThread.py | 869daae80f15c0baad03188e8419cb3f5d0e15f5 | [] | no_license | Yadunund/RobotCommunication | c2a81ec397049a87ad5846f48ed651055d6c8972 | 8f3b8f6ab8adf9ff94c2660ef093f6fd84397b89 | refs/heads/master | 2020-06-18T23:21:27.773233 | 2019-07-12T01:59:46 | 2019-07-12T01:59:46 | 196,490,391 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,383 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Feb 27 09:41:37 2019
@author: Yadunund Vijay
TCP/IP Client Software capable of receiving messages in a thread
"""
import numpy as np
import socket
import time
import threading
received_string=''
send_string=''
server_ip='127.0.0.1'
server_port=65432
connected=False
def client_connect():
global s, connected
while True:
try:
if (connected==False):
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect((server_ip,server_port))
print('Connected to Server')
time.sleep(0.5)
connected=True
except Exception as e:
print("Error"+str(e))
connected= False
def client_recv():
global s, received_string, connected
while True:
try:
if connected:
received_string=s.recv(1024).decode()
print('Received:'+received_string)
except Exception as e:
print("Error"+str(e))
connected=False
s.close()
#creating treads
thread_connect=threading.Thread(target=client_connect)
#thread_recv=threading.Thread(target=client_recv)
thread_connect.start()
#thread_recv.start()
| [
"[email protected]"
] | |
111f7b6d0b55a216661df79fcdd20ef9c022d60e | f7fbe5badfe5d08e6325cde32668a2f3ec01b865 | /project/__init__.py | 2ce296bce76942071cbaca0fe6864a116fd2051c | [] | no_license | adbeskine/fidelitybooks | 5cc5538201c8a9a6b3b2b379b202b942fc833377 | 5ac1390827935e79771b8031354c26fc4967ea28 | refs/heads/master | 2021-01-21T12:47:29.423679 | 2017-09-01T10:11:16 | 2017-09-01T10:11:16 | 102,100,439 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 583 | py | import sys, os
sys.path.append(os.path.abspath(os.path.dirname(__file__) + '../..'))
from flask import Flask, Blueprint
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config.from_pyfile('_config.py')
db = SQLAlchemy(app)
from project.blueprint_base.base import base
from project.blueprint_books.books import books
from project.blueprint_purchase_engine.purchase_engine import purchase_engine
app.register_blueprint(base)
app.register_blueprint(books)
app.register_blueprint(purchase_engine)
##########
###TODO###
##########
# remove footer from contact page | [
"[email protected]"
] | |
ecfd84c978eaaa929d3230dc81bd91a74382f0f7 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-roma/huaweicloudsdkroma/v2/model/create_live_data_api_v2_response.py | b0b0ffba285ed2f3dc279a9296f567580865dac3 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 19,605 | py | # coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class CreateLiveDataApiV2Response(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'name': 'str',
'path': 'str',
'method': 'str',
'description': 'str',
'version': 'str',
'content_type': 'str',
'api_signature_id': 'str',
'roma_app_id': 'str',
'return_format': 'bool',
'parameters': 'list[LdApiParameter]',
'id': 'str',
'instance': 'str',
'type': 'str',
'status': 'int',
'created_time': 'datetime',
'modified_time': 'datetime',
'scripts': 'list[LdApiScript]',
'roma_app_name': 'str'
}
attribute_map = {
'name': 'name',
'path': 'path',
'method': 'method',
'description': 'description',
'version': 'version',
'content_type': 'content_type',
'api_signature_id': 'api_signature_id',
'roma_app_id': 'roma_app_id',
'return_format': 'return_format',
'parameters': 'parameters',
'id': 'id',
'instance': 'instance',
'type': 'type',
'status': 'status',
'created_time': 'created_time',
'modified_time': 'modified_time',
'scripts': 'scripts',
'roma_app_name': 'roma_app_name'
}
def __init__(self, name=None, path=None, method=None, description=None, version=None, content_type=None, api_signature_id=None, roma_app_id=None, return_format=None, parameters=None, id=None, instance=None, type=None, status=None, created_time=None, modified_time=None, scripts=None, roma_app_name=None):
"""CreateLiveDataApiV2Response
The model defined in huaweicloud sdk
:param name: ๅ็ซฏAPIๅ็งฐใ ๆฏๆๆฑๅญใ่ฑๆใๆฐๅญใไธญๅ็บฟใไธๅ็บฟใ็นใๆๆ ใไธญ่ฑๆๆ ผๅผไธ็ๅฐๆฌๅทๅๅๅทใไธญๆๆ ผๅผไธ็้กฟๅท๏ผไธๅช่ฝไปฅ่ฑๆใๆฑๅญๅๆฐๅญๅผๅคดใ
:type name: str
:param path: ๅ็ซฏAPI่ฏทๆฑ่ทฏๅพใ ๆฏๆ่ฑๆใๆฐๅญใไธญๅ็บฟใไธๅ็บฟใ็น็ญ๏ผไธไปฅๆๆ ๏ผ/๏ผๅผๅคดใ
:type path: str
:param method: ๅ็ซฏAPI่ฏทๆฑๆนๆณใ ๆฏๆGETใPUTใPOSTใDELETE
:type method: str
:param description: ๅ็ซฏAPIๆ่ฟฐใ ไธๆฏๆ<๏ผ>ๅญ็ฌฆ
:type description: str
:param version: ๅ็ซฏAPI็ๆฌ ๆฏๆ่ฑๆ๏ผๆฐๅญ๏ผไธๅ็บฟ๏ผไธญๅ็บฟ๏ผ็นใ
:type version: str
:param content_type: ๅ็ซฏAPI่ฟๅ็ฑปๅ
:type content_type: str
:param api_signature_id: ๅ็ซฏAPIไธบ็ญพๅ่ฎค่ฏๆถ็ปๅฎ็็ญพๅๅฏ้ฅ็ผๅท
:type api_signature_id: str
:param roma_app_id: ๅ็ซฏAPIๅฝๅฑ็้ๆๅบ็จ็ผๅท
:type roma_app_id: str
:param return_format: APIๅๅบไฟกๆฏๆฏๅฆๆ ผๅผๅ true๏ผ ๅฏนๅๅบไฟกๆฏ่ฟ่กๆ ผๅผๅ false๏ผๅฏนๅๅบไฟกๆฏๆ ผๅผๅไธ่ฟ่กๆ ผๅผๅ
:type return_format: bool
:param parameters: ๅ็ซฏAPI็่ฏทๆฑๅๆฐๅ่กจ
:type parameters: list[:class:`huaweicloudsdkroma.v2.LdApiParameter`]
:param id: ๅ็ซฏAPI็ผๅท
:type id: str
:param instance: ๅ็ซฏAPIๆๅฑๅฎไพ็ผๅท
:type instance: str
:param type: ๅ็ซฏAPI็ฑปๅ๏ผ - data๏ผๆฐๆฎๅ็ซฏ - function๏ผ ๅฝๆฐๅ็ซฏ
:type type: str
:param status: ๅ็ซฏAPI็ถๆ๏ผ - 1๏ผๅพ
ๅผๅ - 3๏ผๅผๅไธญ - 4๏ผๅทฒ้จ็ฝฒ
:type status: int
:param created_time: ๅ็ซฏAPIๅๅปบๆถ้ด
:type created_time: datetime
:param modified_time: ๅ็ซฏAPIไฟฎๆนๆถ้ด
:type modified_time: datetime
:param scripts: ๅ็ซฏAPI่ๆฌไฟกๆฏ
:type scripts: list[:class:`huaweicloudsdkroma.v2.LdApiScript`]
:param roma_app_name: ๅ็ซฏAPIๅฝๅฑ็้ๆๅบ็จๅ็งฐ
:type roma_app_name: str
"""
super(CreateLiveDataApiV2Response, self).__init__()
self._name = None
self._path = None
self._method = None
self._description = None
self._version = None
self._content_type = None
self._api_signature_id = None
self._roma_app_id = None
self._return_format = None
self._parameters = None
self._id = None
self._instance = None
self._type = None
self._status = None
self._created_time = None
self._modified_time = None
self._scripts = None
self._roma_app_name = None
self.discriminator = None
self.name = name
self.path = path
self.method = method
if description is not None:
self.description = description
self.version = version
self.content_type = content_type
if api_signature_id is not None:
self.api_signature_id = api_signature_id
self.roma_app_id = roma_app_id
if return_format is not None:
self.return_format = return_format
if parameters is not None:
self.parameters = parameters
if id is not None:
self.id = id
if instance is not None:
self.instance = instance
if type is not None:
self.type = type
if status is not None:
self.status = status
if created_time is not None:
self.created_time = created_time
if modified_time is not None:
self.modified_time = modified_time
if scripts is not None:
self.scripts = scripts
if roma_app_name is not None:
self.roma_app_name = roma_app_name
@property
def name(self):
"""Gets the name of this CreateLiveDataApiV2Response.
ๅ็ซฏAPIๅ็งฐใ ๆฏๆๆฑๅญใ่ฑๆใๆฐๅญใไธญๅ็บฟใไธๅ็บฟใ็นใๆๆ ใไธญ่ฑๆๆ ผๅผไธ็ๅฐๆฌๅทๅๅๅทใไธญๆๆ ผๅผไธ็้กฟๅท๏ผไธๅช่ฝไปฅ่ฑๆใๆฑๅญๅๆฐๅญๅผๅคดใ
:return: The name of this CreateLiveDataApiV2Response.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this CreateLiveDataApiV2Response.
ๅ็ซฏAPIๅ็งฐใ ๆฏๆๆฑๅญใ่ฑๆใๆฐๅญใไธญๅ็บฟใไธๅ็บฟใ็นใๆๆ ใไธญ่ฑๆๆ ผๅผไธ็ๅฐๆฌๅทๅๅๅทใไธญๆๆ ผๅผไธ็้กฟๅท๏ผไธๅช่ฝไปฅ่ฑๆใๆฑๅญๅๆฐๅญๅผๅคดใ
:param name: The name of this CreateLiveDataApiV2Response.
:type name: str
"""
self._name = name
@property
def path(self):
"""Gets the path of this CreateLiveDataApiV2Response.
ๅ็ซฏAPI่ฏทๆฑ่ทฏๅพใ ๆฏๆ่ฑๆใๆฐๅญใไธญๅ็บฟใไธๅ็บฟใ็น็ญ๏ผไธไปฅๆๆ ๏ผ/๏ผๅผๅคดใ
:return: The path of this CreateLiveDataApiV2Response.
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this CreateLiveDataApiV2Response.
ๅ็ซฏAPI่ฏทๆฑ่ทฏๅพใ ๆฏๆ่ฑๆใๆฐๅญใไธญๅ็บฟใไธๅ็บฟใ็น็ญ๏ผไธไปฅๆๆ ๏ผ/๏ผๅผๅคดใ
:param path: The path of this CreateLiveDataApiV2Response.
:type path: str
"""
self._path = path
@property
def method(self):
"""Gets the method of this CreateLiveDataApiV2Response.
ๅ็ซฏAPI่ฏทๆฑๆนๆณใ ๆฏๆGETใPUTใPOSTใDELETE
:return: The method of this CreateLiveDataApiV2Response.
:rtype: str
"""
return self._method
@method.setter
def method(self, method):
"""Sets the method of this CreateLiveDataApiV2Response.
ๅ็ซฏAPI่ฏทๆฑๆนๆณใ ๆฏๆGETใPUTใPOSTใDELETE
:param method: The method of this CreateLiveDataApiV2Response.
:type method: str
"""
self._method = method
@property
def description(self):
"""Gets the description of this CreateLiveDataApiV2Response.
ๅ็ซฏAPIๆ่ฟฐใ ไธๆฏๆ<๏ผ>ๅญ็ฌฆ
:return: The description of this CreateLiveDataApiV2Response.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this CreateLiveDataApiV2Response.
ๅ็ซฏAPIๆ่ฟฐใ ไธๆฏๆ<๏ผ>ๅญ็ฌฆ
:param description: The description of this CreateLiveDataApiV2Response.
:type description: str
"""
self._description = description
@property
def version(self):
"""Gets the version of this CreateLiveDataApiV2Response.
ๅ็ซฏAPI็ๆฌ ๆฏๆ่ฑๆ๏ผๆฐๅญ๏ผไธๅ็บฟ๏ผไธญๅ็บฟ๏ผ็นใ
:return: The version of this CreateLiveDataApiV2Response.
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this CreateLiveDataApiV2Response.
ๅ็ซฏAPI็ๆฌ ๆฏๆ่ฑๆ๏ผๆฐๅญ๏ผไธๅ็บฟ๏ผไธญๅ็บฟ๏ผ็นใ
:param version: The version of this CreateLiveDataApiV2Response.
:type version: str
"""
self._version = version
@property
def content_type(self):
"""Gets the content_type of this CreateLiveDataApiV2Response.
ๅ็ซฏAPI่ฟๅ็ฑปๅ
:return: The content_type of this CreateLiveDataApiV2Response.
:rtype: str
"""
return self._content_type
@content_type.setter
def content_type(self, content_type):
"""Sets the content_type of this CreateLiveDataApiV2Response.
ๅ็ซฏAPI่ฟๅ็ฑปๅ
:param content_type: The content_type of this CreateLiveDataApiV2Response.
:type content_type: str
"""
self._content_type = content_type
@property
def api_signature_id(self):
"""Gets the api_signature_id of this CreateLiveDataApiV2Response.
ๅ็ซฏAPIไธบ็ญพๅ่ฎค่ฏๆถ็ปๅฎ็็ญพๅๅฏ้ฅ็ผๅท
:return: The api_signature_id of this CreateLiveDataApiV2Response.
:rtype: str
"""
return self._api_signature_id
@api_signature_id.setter
def api_signature_id(self, api_signature_id):
"""Sets the api_signature_id of this CreateLiveDataApiV2Response.
ๅ็ซฏAPIไธบ็ญพๅ่ฎค่ฏๆถ็ปๅฎ็็ญพๅๅฏ้ฅ็ผๅท
:param api_signature_id: The api_signature_id of this CreateLiveDataApiV2Response.
:type api_signature_id: str
"""
self._api_signature_id = api_signature_id
@property
def roma_app_id(self):
"""Gets the roma_app_id of this CreateLiveDataApiV2Response.
ๅ็ซฏAPIๅฝๅฑ็้ๆๅบ็จ็ผๅท
:return: The roma_app_id of this CreateLiveDataApiV2Response.
:rtype: str
"""
return self._roma_app_id
@roma_app_id.setter
def roma_app_id(self, roma_app_id):
"""Sets the roma_app_id of this CreateLiveDataApiV2Response.
ๅ็ซฏAPIๅฝๅฑ็้ๆๅบ็จ็ผๅท
:param roma_app_id: The roma_app_id of this CreateLiveDataApiV2Response.
:type roma_app_id: str
"""
self._roma_app_id = roma_app_id
@property
def return_format(self):
"""Gets the return_format of this CreateLiveDataApiV2Response.
APIๅๅบไฟกๆฏๆฏๅฆๆ ผๅผๅ true๏ผ ๅฏนๅๅบไฟกๆฏ่ฟ่กๆ ผๅผๅ false๏ผๅฏนๅๅบไฟกๆฏๆ ผๅผๅไธ่ฟ่กๆ ผๅผๅ
:return: The return_format of this CreateLiveDataApiV2Response.
:rtype: bool
"""
return self._return_format
@return_format.setter
def return_format(self, return_format):
"""Sets the return_format of this CreateLiveDataApiV2Response.
APIๅๅบไฟกๆฏๆฏๅฆๆ ผๅผๅ true๏ผ ๅฏนๅๅบไฟกๆฏ่ฟ่กๆ ผๅผๅ false๏ผๅฏนๅๅบไฟกๆฏๆ ผๅผๅไธ่ฟ่กๆ ผๅผๅ
:param return_format: The return_format of this CreateLiveDataApiV2Response.
:type return_format: bool
"""
self._return_format = return_format
@property
def parameters(self):
"""Gets the parameters of this CreateLiveDataApiV2Response.
ๅ็ซฏAPI็่ฏทๆฑๅๆฐๅ่กจ
:return: The parameters of this CreateLiveDataApiV2Response.
:rtype: list[:class:`huaweicloudsdkroma.v2.LdApiParameter`]
"""
return self._parameters
@parameters.setter
def parameters(self, parameters):
"""Sets the parameters of this CreateLiveDataApiV2Response.
ๅ็ซฏAPI็่ฏทๆฑๅๆฐๅ่กจ
:param parameters: The parameters of this CreateLiveDataApiV2Response.
:type parameters: list[:class:`huaweicloudsdkroma.v2.LdApiParameter`]
"""
self._parameters = parameters
@property
def id(self):
"""Gets the id of this CreateLiveDataApiV2Response.
ๅ็ซฏAPI็ผๅท
:return: The id of this CreateLiveDataApiV2Response.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this CreateLiveDataApiV2Response.
ๅ็ซฏAPI็ผๅท
:param id: The id of this CreateLiveDataApiV2Response.
:type id: str
"""
self._id = id
@property
def instance(self):
"""Gets the instance of this CreateLiveDataApiV2Response.
ๅ็ซฏAPIๆๅฑๅฎไพ็ผๅท
:return: The instance of this CreateLiveDataApiV2Response.
:rtype: str
"""
return self._instance
@instance.setter
def instance(self, instance):
"""Sets the instance of this CreateLiveDataApiV2Response.
ๅ็ซฏAPIๆๅฑๅฎไพ็ผๅท
:param instance: The instance of this CreateLiveDataApiV2Response.
:type instance: str
"""
self._instance = instance
@property
def type(self):
"""Gets the type of this CreateLiveDataApiV2Response.
ๅ็ซฏAPI็ฑปๅ๏ผ - data๏ผๆฐๆฎๅ็ซฏ - function๏ผ ๅฝๆฐๅ็ซฏ
:return: The type of this CreateLiveDataApiV2Response.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this CreateLiveDataApiV2Response.
ๅ็ซฏAPI็ฑปๅ๏ผ - data๏ผๆฐๆฎๅ็ซฏ - function๏ผ ๅฝๆฐๅ็ซฏ
:param type: The type of this CreateLiveDataApiV2Response.
:type type: str
"""
self._type = type
@property
def status(self):
"""Gets the status of this CreateLiveDataApiV2Response.
ๅ็ซฏAPI็ถๆ๏ผ - 1๏ผๅพ
ๅผๅ - 3๏ผๅผๅไธญ - 4๏ผๅทฒ้จ็ฝฒ
:return: The status of this CreateLiveDataApiV2Response.
:rtype: int
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this CreateLiveDataApiV2Response.
ๅ็ซฏAPI็ถๆ๏ผ - 1๏ผๅพ
ๅผๅ - 3๏ผๅผๅไธญ - 4๏ผๅทฒ้จ็ฝฒ
:param status: The status of this CreateLiveDataApiV2Response.
:type status: int
"""
self._status = status
@property
def created_time(self):
"""Gets the created_time of this CreateLiveDataApiV2Response.
ๅ็ซฏAPIๅๅปบๆถ้ด
:return: The created_time of this CreateLiveDataApiV2Response.
:rtype: datetime
"""
return self._created_time
@created_time.setter
def created_time(self, created_time):
"""Sets the created_time of this CreateLiveDataApiV2Response.
ๅ็ซฏAPIๅๅปบๆถ้ด
:param created_time: The created_time of this CreateLiveDataApiV2Response.
:type created_time: datetime
"""
self._created_time = created_time
@property
def modified_time(self):
"""Gets the modified_time of this CreateLiveDataApiV2Response.
ๅ็ซฏAPIไฟฎๆนๆถ้ด
:return: The modified_time of this CreateLiveDataApiV2Response.
:rtype: datetime
"""
return self._modified_time
@modified_time.setter
def modified_time(self, modified_time):
"""Sets the modified_time of this CreateLiveDataApiV2Response.
ๅ็ซฏAPIไฟฎๆนๆถ้ด
:param modified_time: The modified_time of this CreateLiveDataApiV2Response.
:type modified_time: datetime
"""
self._modified_time = modified_time
@property
def scripts(self):
"""Gets the scripts of this CreateLiveDataApiV2Response.
ๅ็ซฏAPI่ๆฌไฟกๆฏ
:return: The scripts of this CreateLiveDataApiV2Response.
:rtype: list[:class:`huaweicloudsdkroma.v2.LdApiScript`]
"""
return self._scripts
@scripts.setter
def scripts(self, scripts):
"""Sets the scripts of this CreateLiveDataApiV2Response.
ๅ็ซฏAPI่ๆฌไฟกๆฏ
:param scripts: The scripts of this CreateLiveDataApiV2Response.
:type scripts: list[:class:`huaweicloudsdkroma.v2.LdApiScript`]
"""
self._scripts = scripts
@property
def roma_app_name(self):
"""Gets the roma_app_name of this CreateLiveDataApiV2Response.
ๅ็ซฏAPIๅฝๅฑ็้ๆๅบ็จๅ็งฐ
:return: The roma_app_name of this CreateLiveDataApiV2Response.
:rtype: str
"""
return self._roma_app_name
@roma_app_name.setter
def roma_app_name(self, roma_app_name):
"""Sets the roma_app_name of this CreateLiveDataApiV2Response.
ๅ็ซฏAPIๅฝๅฑ็้ๆๅบ็จๅ็งฐ
:param roma_app_name: The roma_app_name of this CreateLiveDataApiV2Response.
:type roma_app_name: str
"""
self._roma_app_name = roma_app_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateLiveDataApiV2Response):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
e125779e5b82afea571e3e761b4bc5562991e0f0 | 7211c66eea8cf97c3f5cc4308f3bb6f7cf84927a | /HackerRank/graph/snakes_and_ladders.py | fa6ce12d75a5096a52c5291c0cacd584af525b39 | [] | no_license | Quinnan-Gill/Algorithms | cc420010f2723117aff936cb43ca8a83a69f6a7b | 766061840577af2cf4c072305a2ce125c3472133 | refs/heads/master | 2020-04-05T00:57:09.614494 | 2019-07-18T04:20:01 | 2019-07-18T04:20:01 | 156,417,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,425 | py | import sys
from queue import Queue
# Complete the quickestWayUp function below.
def quickestWayUp(ladders, snakes):
board = list(range(0, 101))
for edge in (ladders + snakes):
board[edge[0]] = edge[1]
visited = {}
queue = Queue()
queue.put((1, 0))
visited[1] = []
while queue:
val, roll = queue.get()
print(str(val)+": "+str([board[i] for i in range(val+7, val, -1) if i <= 100]))
for adj in range(val+6, val, -1):
if adj > 100:
continue
if board[adj] == 100:
return roll+1
elif board[adj] not in visited:
queue.put((board[adj], roll+1))
visited[adj] = True
if __name__ == "__main__":
# ladders = [[32, 62],
# [42, 68],
# [12, 98]]
#
# snakes = [[95, 13],
# [97, 25],
# [93, 37],
# [79, 27],
# [75, 19],
# [49, 47],
# [67, 17]]
#
# print(quickestWayUp(ladders, snakes))
ladders = [[8 ,52],
[6 ,80],
[26, 42],
[2 ,72]]
snakes = [[51, 19],
[39, 11],
[37, 29],
[81, 3],
[59, 5],
[79, 23],
[53, 7],
[43, 33],
[77, 21]]
print(quickestWayUp(ladders, snakes))
| [
"[email protected]"
] | |
7a7bec1ece07b1d1a242b3681770ec03f229960c | 72d7e53e53d0fd60a5dbc6ece5e6c3a19a1cddc8 | /Lib/hTools2/dialogs/font/create_spaces.py | 638d3d26fe5312df74b656d6aefaadd883fd82cc | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | miguelsousa/hTools2 | d814d92f79d5f5bdaab16415626dd1f47692cfa9 | eab400677c1b21bb2519a7354a142e167c2b39ba | refs/heads/master | 2021-01-15T18:36:09.522992 | 2013-09-27T13:48:05 | 2013-09-27T13:48:05 | 6,453,382 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,299 | py | # [h] a dialog to create space glyphs
# debug
import hTools2
reload(hTools2)
if hTools2.DEBUG:
import hTools2.modules.fontutils
reload(hTools2.modules.fontutils)
import hTools2.modules.encoding
reload(hTools2.modules.encoding)
# imports
from vanilla import *
try:
from mojo.roboFont import CurrentFont, CurrentGlyph
except:
from robofab.world import CurrentFont, CurrentGlyph
from hTools2.modules.fontutils import get_full_name
from hTools2.modules.encoding import unicode_hexstr_to_int
# objects
class createSpaceGlyphsDialog(object):
"""A dialog to create space glyphs in a font."""
_title = 'spaces'
_padding = 10
_padding_top = 10
_column_1 = 55
_field_width = 40
_row_height = 18
_button_height = 30
_box_height = 23
_height = (_row_height * 5) + (_button_height * 1) + (_padding * 8) + _box_height + 4
_width = 123
_hairspace_factor = .08
_thinspace_factor = .16
_thickspace_factor = .333
_figurespace_factor = .6
def __init__(self):
if CurrentFont() is not None:
self.font = CurrentFont()
self.w = FloatingWindow(
(self._width, self._height),
self._title,
closable=True)
# current font
x = self._padding
y = self._padding
self.w.box = Box(
(x, y,
-self._padding,
self._box_height))
self.w.box.text = TextBox(
(5, 0,
-self._padding,
self._row_height),
text=get_full_name(self.font),
sizeStyle='small')
# hair space
y += self._row_height + 18
self.w._hairspace_label = TextBox(
(x, y,
self._column_1,
self._row_height),
"hair",
sizeStyle='small')
x += self._column_1
self.w._hairspace_value = EditText(
(x, y,
-self._padding,
self._row_height),
text=int(self.font.info.unitsPerEm * self._hairspace_factor),
sizeStyle='small')
# thin space
x = self._padding
y += self._row_height + self._padding
self.w._thinspace_label = TextBox(
(x, y,
self._column_1,
self._row_height),
"thin",
sizeStyle='small')
x += self._column_1
self.w._thinspace_value = EditText(
(x, y,
-self._padding,
self._row_height),
text=int(self.font.info.unitsPerEm * self._thinspace_factor),
sizeStyle='small')
# thick space
x = self._padding
y += self._row_height + self._padding
self.w._thickspace_label = TextBox(
(x, y,
self._column_1,
self._row_height),
"thick",
sizeStyle='small')
x += self._column_1
self.w._thickspace_value = EditText(
(x, y,
-self._padding,
self._row_height),
text=int(self.font.info.unitsPerEm * self._thickspace_factor),
sizeStyle='small')
# figure space
x = self._padding
y += self._row_height + self._padding
self.w._figurespace_label = TextBox(
(x, y,
self._column_1,
self._row_height),
"figure",
sizeStyle='small')
x += self._column_1
self.w._figurespace_value = EditText(
(x, y,
-self._padding,
self._row_height),
text=int(self.font.info.unitsPerEm * self._figurespace_factor),
sizeStyle='small')
# zero width space
x = self._padding
y += self._row_height + self._padding
self.w._zerowidth_label = TextBox(
(x, y,
self._column_1,
self._row_height),
"0 width",
sizeStyle='small')
x += self._column_1
self.w._zerowidth_value = EditText(
(x, y,
-self._padding,
self._row_height),
text='0',
readOnly=True,
sizeStyle='small')
# buttons
x = self._padding
y += self._row_height + self._padding
self.w._button_apply = SquareButton(
(x, y,
-self._padding,
self._button_height),
"create",
sizeStyle='small',
callback = self.apply_callback)
# y += self._button_height + self._padding
# self.w._button_switch = SquareButton(
# (x, y,
# -self._padding,
# self._button_height),
# "update",
# sizeStyle='small',
# callback=self.update_font_callback)
# open window
self.w.open()
# no font open
else:
print 'please open a font first.\n'
def apply_callback(self, sender):
_hairspace = int(self.w._hairspace_value.get())
_thinspace = int(self.w._thinspace_value.get())
_thickspace = int(self.w._thickspace_value.get())
_figurespace = int(self.w._figurespace_value.get())
# boolstring = (False, True)
if self.font is not None:
# print info
print 'creating space glyphs...\n'
print '\thair space: %s units' % _hairspace
print '\tthin space: %s units' % _thinspace
print '\tthick space: %s units' % _thickspace
print '\tfigure space: %s units' % _figurespace
print '\tzero-width space: 0'
# hair space
self.font.newGlyph('hairspace')
self.font['hairspace'].width = _hairspace
self.font['hairspace'].unicode = unicode_hexstr_to_int('uni200A')
self.font['hairspace'].update()
# thin space
self.font.newGlyph('thinspace')
self.font['thinspace'].width = _thinspace
self.font['thinspace'].unicode = unicode_hexstr_to_int('uni2009')
self.font['thinspace'].update()
# thick space
self.font.newGlyph('thickspace')
self.font['thickspace'].width = _thickspace
self.font['thickspace'].unicode = unicode_hexstr_to_int('uni2004')
self.font['thickspace'].update()
# figure space
self.font.newGlyph('figurespace')
self.font['figurespace'].width = _figurespace
self.font['figurespace'].unicode = unicode_hexstr_to_int('uni2007')
self.font['figurespace'].update()
# zero-width space
self.font.newGlyph('zerowidthspace')
self.font['zerowidthspace'].width = 0
self.font['zerowidthspace'].unicode = unicode_hexstr_to_int('uni200B')
self.font['zerowidthspace'].update()
# done
self.font.update()
print
print '...done.\n'
else:
print 'No font selected, please close the dialog and try again.\n'
def update_font_callback(self, sender):
self.font = CurrentFont()
self.w.box.text.set(get_full_name(self.font))
def close_callback(self, sender):
self.w.close()
| [
"[email protected]"
] | |
c7dbfe5b1beecc9575b1b80efcf3806a7f67fc0d | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_172/ch14_2020_03_02_13_49_56_090971.py | 5694082fc81bea21230118ca87fdde969faa2512 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py | import math
#calcula_distancia_do_projetil
def calcula_distancia_do_projetil (v,y,o):
d=(v**2/2*9.8)*(1+(1+(2*9.8*y/(v**2)*(math.sin(o))**2))**1/2)*(math.sin(2*o))
return d | [
"[email protected]"
] | |
03bc5e94be60fffd853f3fb807cf40cd48b22a37 | eb874c1b263a1db2f7beb04b08d51a5b0f6ad8e8 | /dialogue_pl/data/__init__.py | 49d24ae5a545529689f4ecd4d288609922345155 | [] | no_license | freekang/DCL-1 | 2c89d5b07767864819709118f06f1e34465c5c5d | 63f30e282716a22a922c29e4f3b5e5f81696dbb0 | refs/heads/main | 2023-04-03T14:29:45.764790 | 2021-04-11T02:53:20 | 2021-04-11T02:53:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30 | py | from .dialogue import DIALOGUE | [
"[email protected]"
] | |
79a050f31a660c145abbe294845dfb565e0e78c2 | 81d523c432d2b6f20b40fd96a8be9a84b32da0c1 | /apm/manage.py | ca3e62e7767dcc19a517959a82a2b88884fcaf86 | [] | no_license | devmanorg/apm-server | 3550e472554a7bb66e6ed93a8dce10f37b32f663 | 4fb1affd0c1f775fc7ff051ce5b18223343fad55 | refs/heads/master | 2022-05-03T18:40:35.366061 | 2021-10-12T13:50:11 | 2021-10-12T13:50:11 | 215,140,988 | 1 | 0 | null | 2022-04-22T22:32:47 | 2019-10-14T20:40:06 | Python | UTF-8 | Python | false | false | 623 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'apm.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
d541609cc7a9545847395399b4cb428f20e37fd4 | 603db646b1ec2a9c9642366cc0da552611560786 | /Implementation/document_matrix.py | 5a458b9c0de6957e0744cc5f6b9c953542022641 | [] | no_license | yazoo178/08_JUL_TP | 3f6a73795229d35f7ae7fc18246b73de888a4eb0 | 2c020a66bbf6378487cc71525b9235042803f38c | refs/heads/master | 2020-12-02T12:43:58.705785 | 2017-07-07T23:04:46 | 2017-07-07T23:04:46 | 96,582,604 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,003 | py | import re, math, concurrent.futures
WordRegex = "'?\w[\w\d']*(?:-\w+)*'?"
WordIndexFileRegex = "('?\w[\w\d']*(?:-\w+)*'?)(\t)({[0-9]+,[0-9]+})+"
TokenMatch = "{([0-9]+),([0-9]+)}"
#class used to represent our documents
#encapsulate the tokenized words
#the documents in which they occur
#and the count of each word
#this class also stores a vector
#representation of each document
class DocumentMatrix:
def __init__(self, _documentReader,_tfWeighter, _idfType):
#This is our inverted index
#words->id->count
self.dataDict = {}
#Used for reading document collection
self.documentReader = _documentReader
#number of documents in the collection
self.documentCount = 0
#tf.idf vectors to store against against each document
#this is optional and can be ignored by specifying the -F flag
self.vectorDocs = {}
#document ids
self.docIds = set()
#The most common term
self.maxTermFreq = 0
#abstract reference to a tf weighting
#changes depending on the command-line parameters for -tf
self.tfWeighter = _tfWeighter
#idf type
self.idfType = _idfType
#This method will load the collection data from the specified documents
#file. It then adds the data into dataDict
#ARG:stops:An optional stop list of words to ignore
#ARG:stem: the type of stemmer to use.
def loadIndexFromCollection(self, stops, stem, onTheFly):
for doc in self.documentReader:
self.docIds.add(doc.docid)
for line in doc.lines:
for word in re.finditer(WordRegex, line):
lowerWord = word.group().lower()
if stem:
lowerWord = stem(lowerWord)
if lowerWord not in stops:
if lowerWord not in self.dataDict:
self.dataDict[lowerWord] = {}
self.dataDict[lowerWord][doc.docid] = 1
else:
if doc.docid in self.dataDict[lowerWord]:
self.dataDict[lowerWord][doc.docid] += 1
else:
self.dataDict[lowerWord][doc.docid] = 1
self.computeDocumentCount()
#if the on the fly flag was specified
#then ignore this
if onTheFly != 1:
self.populateVectorIndex()
#Output the data in dataDict to a text file
#ARG:outputFileName:the name of the file to output
def outputFileIndex(self, outputFileName):
file = open(outputFileName, 'w')
for word in self.dataDict:
file.write(word + "\t")
for entry in self.dataDict[word]:
file.write("{" + str(entry) + ',' +
str(self.dataDict[word][entry]) + "}")
file.write('\n')
file.close()
#Loads exisiting data from an index file into dataDict
#ARG:indexFile:the path of the index file
def loadIndexFromIndexFile(self, indexFile, onTheFly):
file = open(indexFile, 'r')
data = file.read()
for line in re.finditer(WordIndexFileRegex, data):
word = line.group(1)
self.dataDict[word] = {}
for tokenMatcher in re.finditer(TokenMatch, line.group()):
self.docIds.add(int(tokenMatcher.group(1)))
self.dataDict[word][int(tokenMatcher.group(1))] = 0
self.dataDict[word][int(tokenMatcher.group(1))] += int(tokenMatcher.group(2))
self.computeDocumentCount()
#if the on the fly flag was specified
#then ignore this
if onTheFly != 1:
self.populateVectorIndex()
#works out the number of documents in collections
#used for when we load an existing index file
def computeDocumentCount(self):
resultSet = set()
for keyWord in self.dataDict:
for keySet in self.dataDict[keyWord]:
resultSet.add(keySet)
if self.dataDict[keyWord][keySet] > self.maxTermFreq:
self.maxTermFreq = self.dataDict[keyWord][keySet]
self.documentCount = len(resultSet)
#returns total number of documents in collection
def totalDocumentsInCollection(self):
return self.documentCount
#returns how many documents contain a given word
def documentFreqOfWord(self, word):
if word in self.dataDict:
return len(self.dataDict[word])
else:
return 0
#returns the idf
def inverseDocumentFreq(self, word):
return math.log10(self.totalDocumentsInCollection() / self.documentFreqOfWord(word))
def probinverseDocumentFreq(self, word):
val= max(0, math.log((self.totalDocumentsInCollection()
- self.documentFreqOfWord(word))/self.documentFreqOfWord(word)))
return val
#populates tf.idf values for this dataset
def populateVectorIndex(self):
from document_vector import DocumentVector
from progress import printProgress
print(self.idfType)
for docId in self.docIds:
printProgress(docId, len(self.docIds), prefix = 'Computing tf.idf vectors:', suffix = 'Complete', barLength = 50)
vect = DocumentVector()
self.vectorDocs[docId] = vect
for word in self.dataDict:
if not self.tfWeighter.ignoreZeroes() or docId in self.dataDict[word]:
tfValue = self.tfWeighter.getTfWeightingForDocument(word, docId, self)
idfValue = self.inverseDocumentFreq(word) if self.idfType == "t" else self.probinverseDocumentFreq(word)
vect.addValue(word, tfValue * idfValue)
vect.finalize()
| [
"[email protected]"
] | |
c8613482012ee5bf3cb7cfc3dc94da44235db342 | 6f2d1ccce4e26507197ff7999d6f5de685cc7b52 | /lfs_bulk_prices/migrations/0002_auto_20151116_0957.py | 70d67a14a6c6116fbec2c2ea352e8f6279a31fbb | [] | no_license | diefenbach/lfs-bulk-prices | fa75ac290ad65cb1710a92d40c8985371e03a08d | 4436c2a4900252d2d383a204592a2b09cf4da44d | refs/heads/master | 2021-07-11T12:57:06.756062 | 2021-02-19T09:16:48 | 2021-02-19T09:18:18 | 46,726,824 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,109 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lfs_bulk_prices', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='bulkprice',
options={'ordering': ('amount',)},
),
migrations.RenameField(
model_name='bulkprice',
old_name='price',
new_name='price_total',
),
migrations.AddField(
model_name='bulkprice',
name='amount',
field=models.SmallIntegerField(default=1),
preserve_default=False,
),
migrations.AddField(
model_name='bulkprice',
name='price_percentual',
field=models.DecimalField(default=0, verbose_name='Price', max_digits=10, decimal_places=2),
preserve_default=False,
),
migrations.AlterUniqueTogether(
name='bulkprice',
unique_together=set([('product', 'price_total')]),
),
]
| [
"[email protected]"
] | |
d7882fa251ce0c003d6de4b848fbff3c65cbdc40 | 01fdd206c8c825b30870bdd3f6e75f0aa113b849 | /test/record/parser/test_response_whois_dns_be_response_throttled_limit.py | 014c5d855b66aa98b92ba4b07b0d6a1eaf38451c | [
"MIT"
] | permissive | huyphan/pyyawhois | 0fbc5a7d64a53ae6e3393fdc1c7ff0d0ac5f22b5 | 77fb2f73a9c67989f1d41d98f37037406a69d136 | refs/heads/master | 2021-01-23T22:42:55.989651 | 2015-09-19T16:40:06 | 2015-09-19T16:40:06 | 23,335,785 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 790 | py |
# This file is autogenerated. Do not edit it manually.
# If you want change the content of this file, edit
#
# spec/fixtures/responses/whois.dns.be/response_throttled_limit
#
# and regenerate the tests with the following script
#
# $ scripts/generate_tests.py
#
from nose.tools import *
from dateutil.parser import parse as time_parse
import yawhois
class TestWhoisDnsBeResponseThrottledLimit(object):
def setUp(self):
fixture_path = "spec/fixtures/responses/whois.dns.be/response_throttled_limit.txt"
host = "whois.dns.be"
part = yawhois.record.Part(open(fixture_path, "r").read(), host)
self.record = yawhois.record.Record(None, [part])
def test_response_throttled(self):
eq_(self.record.response_throttled, True)
| [
"[email protected]"
] | |
0b86382d473f8d6bbaff9f76378219dd14a66205 | 67b757dcbdd1ea1fdcd88ea28e14ec79a0bfbf49 | /project/migrations/0001_initial.py | de9720655a9368332615ff025a3d41851e344c59 | [] | no_license | arashaga/portal | 12fa4211d25a8beafac3a1ec69bb1a51d277b1ba | bb6958e3650a2be2628249a4ef12500f25f51032 | refs/heads/master | 2021-03-12T22:45:57.775263 | 2014-08-13T19:25:45 | 2014-08-13T19:25:45 | 21,305,193 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,588 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Projects'
db.create_table(u'project_projects', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('project_name', self.gf('django.db.models.fields.CharField')(max_length=120)),
('project_title', self.gf('django.db.models.fields.CharField')(max_length=120)),
('project_number', self.gf('django.db.models.fields.IntegerField')(null=True)),
('project_address', self.gf('django.db.models.fields.CharField')(max_length=120, null=True, blank=True)),
('project_city', self.gf('django.db.models.fields.CharField')(max_length=120, null=True, blank=True)),
('project_state', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['state.States'])),
('project_start', self.gf('django.db.models.fields.DateField')()),
('project_finish', self.gf('django.db.models.fields.DateField')()),
('project_owner', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['company.Companies'])),
('Project_creation_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('project_active', self.gf('django.db.models.fields.BooleanField')()),
))
db.send_create_signal(u'project', ['Projects'])
# Adding model 'ProjectGroup'
db.create_table(u'project_projectgroup', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('project_group_description', self.gf('django.db.models.fields.CharField')(max_length=200)),
('project_group_abrv', self.gf('django.db.models.fields.CharField')(max_length=20)),
('project_group_timestamp', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal(u'project', ['ProjectGroup'])
# Adding model 'ProjectContacts'
db.create_table(u'project_projectcontacts', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('project_contact', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['signup.SignUp'])),
('project_contact_group', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['project.ProjectGroup'])),
('project_contact_add_date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal(u'project', ['ProjectContacts'])
def backwards(self, orm):
# Deleting model 'Projects'
db.delete_table(u'project_projects')
# Deleting model 'ProjectGroup'
db.delete_table(u'project_projectgroup')
# Deleting model 'ProjectContacts'
db.delete_table(u'project_projectcontacts')
models = {
u'address.addresses': {
'Meta': {'object_name': 'Addresses'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['state.States']"}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'company.companies': {
'Meta': {'object_name': 'Companies'},
'company_abv': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'company_address': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['address.Addresses']"}),
'company_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '120'}),
'date_modified': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {})
},
u'project.projectcontacts': {
'Meta': {'object_name': 'ProjectContacts'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project_contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['signup.SignUp']"}),
'project_contact_add_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'project_contact_group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['project.ProjectGroup']"})
},
u'project.projectgroup': {
'Meta': {'object_name': 'ProjectGroup'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project_group_abrv': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'project_group_description': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'project_group_timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
u'project.projects': {
'Meta': {'object_name': 'Projects'},
'Project_creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project_active': ('django.db.models.fields.BooleanField', [], {}),
'project_address': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'project_city': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'project_finish': ('django.db.models.fields.DateField', [], {}),
'project_name': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'project_number': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'project_owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['company.Companies']"}),
'project_start': ('django.db.models.fields.DateField', [], {}),
'project_state': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['state.States']"}),
'project_title': ('django.db.models.fields.CharField', [], {'max_length': '120'})
},
u'signup.signup': {
'Meta': {'object_name': 'SignUp'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['address.Addresses']", 'null': 'True', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '254', 'db_index': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'state.states': {
'Meta': {'object_name': 'States'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state_abv': ('django.db.models.fields.CharField', [], {'max_length': '2'})
}
}
complete_apps = ['project'] | [
"[email protected]"
] | |
b6ee9ff2a44b284a10b7d1885a7e8fb5f83db66f | 9f37cb9c18d04aa85f9a9129bab379c5c02f721c | /MRINet.py | 5381fefc56c51482ab7e2265b026a61d23fca7d2 | [] | no_license | KarthikeyanG44/MRI-3D-CNN | 560437d5a74a79918973a7eaf5e142f07ffed893 | 1de174be243bcd7a1d572e27c79adbc8bd29e39d | refs/heads/master | 2022-04-23T09:10:16.659081 | 2020-04-24T13:04:17 | 2020-04-24T13:04:17 | 255,686,732 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,739 | py | #### Import Dependencies ###
import torch.nn as nn
import torch.nn.functional as F
####### Define the network #####
class MRINet(nn.Module):
def __init__(self,):
super(MRINet, self).__init__()
self.Conv_1 = nn.Conv3d(1, 8, 3,stride=1)
self.Conv_1_bn = nn.BatchNorm3d(8)
self.Conv_2 = nn.Conv3d(8, 16, 3,stride=1)
self.Conv_2_bn = nn.BatchNorm3d(16)
self.Conv_3 = nn.Conv3d(16, 32, 3,stride=1)
self.Conv_3_bn = nn.BatchNorm3d(32)
self.Conv_4 = nn.Conv3d(32, 64, 3,stride=1)
self.Conv_4_bn = nn.BatchNorm3d(64)
# self.dropout = nn.Dropout3d(p = 0.6)
self.dense_1 = nn.Linear(64*1*2*2,64)
self.dense_2 = nn.Linear(64, 32)
self.dense_3 = nn.Linear(32,16)
self.dense_4 = nn.Linear(16,8)
self.dense_5 = nn.Linear(8,2)
self.relu = nn.ReLU()
def forward(self,x):
x = self.relu(self.Conv_1_bn(self.Conv_1(x)))
x = F.max_pool3d(x, 3)
# x = self.dropout(x)
# print("After convolution 1",x.size())
x = self.relu(self.Conv_2_bn(self.Conv_2(x)))
x = F.max_pool3d(x, 3)
# x = self.dropout(x)
# print("After convolution 2",x.size())
x = self.relu(self.Conv_3_bn(self.Conv_3(x)))
x = F.max_pool3d(x, 2)
# print("After convolution 3",x.size())
x = self.relu(self.Conv_4_bn(self.Conv_4(x)))
x = F.max_pool3d(x,2)
# print("After convolution 4",x.size())
x = x.view(-1,64*1*2*2)
x = self.relu(self.dense_1(x))
x = self.relu(self.dense_2(x))
x = self.relu(self.dense_3(x))
x = self.relu(self.dense_4(x))
x = self.dense_5(x)
return F.log_softmax(x, dim=1)
| [
"[email protected]"
] | |
ceafb48a0c91b672dd1c099335f8dadfba4a977b | 2d53d2d4ae159a7ceec0f1299ac3f9cdf7ecb133 | /docs/conf.py | 19963fb93adce54e1c3123c2b9db22ea2db72a34 | [
"MIT"
] | permissive | yngtodd/orbiter | 89f8707d6f6a8f7d3b7b8ebaba80de9b67a1b7a8 | 7dac1e9c79dc1717d700639e51a7ab4b5d44593e | refs/heads/master | 2020-04-09T04:16:36.593951 | 2018-12-03T06:15:43 | 2018-12-03T06:15:43 | 160,016,613 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,561 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
# Make sure spinup is accessible without going through setup.py
dirname = os.path.dirname
sys.path.insert(0, dirname(dirname(__file__)))
# Mock mpi4py to get around having to install it on RTD server (which fails)
from unittest.mock import MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return MagicMock()
MOCK_MODULES = ['mpi4py']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# Finish imports
import orbiter
from recommonmark.parser import CommonMarkParser
source_parsers = {
'.md': CommonMarkParser,
}
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.imgmath',
'sphinx.ext.viewcode',
'sphinx.ext.autodoc',
'sphinx.ext.napoleon']
#'sphinx.ext.mathjax', ??
# imgmath settings
imgmath_image_format = 'svg'
imgmath_font_size = 14
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
# source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Orbiter'
copyright = u'2018, Todd Young'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = orbiter.__version__
# The full version, including alpha/beta/rc tags.
release = orbiter.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'default' #'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_logo = "_static/img/orbiter.png"
html_theme_options = {
'logo_only': True
}
html_favicon = "_static/img/favicon.ico"
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'orbiterdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'orbiter.tex', u'Orbiter Documentation',
u'Todd Young', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'orbiter', u'Orbiter Documentation',
[u'Todd Young'], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'orbiter', u'Orbiter Documentation',
u'Todd Young', 'orbiter', 'One line description of project.',
'Miscellaneous'),
]
def setup(app):
app.add_stylesheet('css/modify.css')
| [
"[email protected]"
] | |
4ff3e5cad72556fe9f962ad7f103b00c8c5e6ba2 | 53e4afae28fb51bf3227c9b4b1288f5ae902db43 | /reddit/structures.py | 555d3f55305d0b600f2ba325c6ad1ef88a61011c | [
"BSD-3-Clause"
] | permissive | geosoco/reddit_api | 64994522f301e50e44d7e9e3ffddb97769ccd58f | 02a1b6564e444316617b1daf55c4c63ca64875f0 | refs/heads/master | 2021-01-25T13:58:19.822716 | 2018-03-03T02:12:56 | 2018-03-03T02:12:56 | 123,632,140 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,949 | py | #!/usr/bin/env python
#
"""
Reddit objects
"""
import requests
import logging
log = logging.getLogger(__file__)
"""
==========================================================================
Thing
==========================================================================
"""
class Thing(object):
"""
Wrapper for a thing object
"""
def __init__(self, connection, endpoint, delay_request=False, **kwargs):
self.params = kwargs.pop("params", {})
self.response = None
self.error = None
self.connection = connection
self.endpoint = endpoint
if not delay_request:
self.make_request()
def make_request(self):
#print "thing request", self.endpoint
response = self.connection.get(
self.endpoint,
params=self.params
)
#print response.status_code
if response.status_code == requests.codes.ok:
self.response = response.json()
self.error = None
#print "response: ", self.response
else:
self.response = None
log.error("Request error ({}, {}, {})".format(
self.endpoint, response.status_code, response.text
))
self.error = {
"status": response.status_code,
"text": response.text
}
"""
==========================================================================
Listing
==========================================================================
"""
class Listing(object):
"""
Wrapper for a listing object
"""
def __init__(self, reddit, endpoint, limit=100, params=None):
default_params = {
"after": "",
"before": "",
"limit": limit
}
if params is not None:
self.params = dict(default_params, **params)
else:
self.params = default_params
self.connection = reddit
self.endpoint = endpoint
self.response = None
self.resp_index = 0
def make_request(self):
print "listing request", self.endpoint
response = self.connection.get(
self.endpoint,
params=self.params)
self.resp_index = 0
if response.status_code == requests.codes.ok:
self.response = response.json()
else:
print "listing error response", response.status_code
self.response = None
def next_page(self):
""" """
self.params["after"] = ""
if self.response is not None:
data = self.response.get("data", None)
if data is not None:
# if we already have a response, and we have an after
# field, use that as a parameter in the request
after = data.get("after", None)
if after is None:
return False
self.params["after"] = after
self.params["before"] = ""
self.make_request()
return (self.response is not None)
def __iter__(self):
""" implements iterator protocol. """
self.response = None
self.resp_index = 0
return self
def next(self):
""" returns next object. """
# if we don't have a response, grab one
if self.response is None:
if not self.next_page():
raise StopIteration()
data = self.response.get("data", {})
children = data.get("children", {})
# do we need to move to the next page?
if self.resp_index >= len(children):
if not self.next_page():
raise StopIteration
# make sure we're still within range
if self.resp_index < len(children):
list_item = children[self.resp_index]
self.resp_index += 1
return list_item
raise StopIteration()
| [
"[email protected]"
] | |
5355bfcabb3956189c99cc3cea4fa9121f492993 | fc947740e3d546620c9bac01194de1078e2b485b | /xDeepFake-Test/deepFake_test_video_decision.py | 51a2b866069b3ce608a15a52881d28c7af6a4fd0 | [
"MIT"
] | permissive | umitkacar/Kaggle-DeepFakes | 02ff9a599f7e35dbaa2a8b2831841d31f2224b3d | 3e84a3eff4697e527f2324b9716adfb208a7be62 | refs/heads/master | 2022-03-30T16:48:05.049612 | 2020-04-04T12:37:32 | 2020-04-04T12:37:32 | 252,771,851 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,109 | py |
import sys
from datetime import datetime
import time
from mtcnn import MTCNN
import tensorflow as tf
import numpy as np
import cv2
from model.dtn import DTN
DEEPFAKE_MODEL_PATH = "/home/umit/xDeepFake/log/model/ckpt-204"
def leaf_l1_score(xlist, masklist, ch=None):
loss_list = []
xshape = xlist[0].shape
scores = []
for x, mask in zip(xlist, masklist):
if ch is not None:
score = tf.reduce_mean(tf.reshape(tf.abs(x[:, :, :, ch]), [xshape[0], -1]), axis=1)
else:
score = tf.reduce_mean(tf.reshape(tf.abs(x), [xshape[0], -1]), axis=1)
spoof_score = score * mask[:, 0]
scores.append(spoof_score)
loss = np.sum(np.stack(scores, axis=1), axis=1)
return loss
def _from_np_to_tf_func(image,label):
return image.astype(np.float32), label.astype(np.float32)
if __name__ == "__main__":
detector = MTCNN()
dtn = DTN(32)
dtn_op = tf.compat.v1.train.AdamOptimizer(0.0005, beta1=0.5)
checkpoint = tf.train.Checkpoint(dtn=dtn,
dtn_optimizer=dtn_op)
checkpoint.restore(DEEPFAKE_MODEL_PATH)
#cap = cv2.VideoCapture("/media/umit/wd4tb/xDeepFake/deepfake-detection-challenge/train_full_videos/dfdc_train_part_17/hugcokpuks.mp4")
cap = cv2.VideoCapture("/home/umit/xDataset/deepfake-detection-challenge/test_videos/bwdmzwhdnw.mp4")
# OpenCV image config
font = cv2.FONT_HERSHEY_SIMPLEX
upLeftCornerOfText = (50,50)
fontScale = 1
lineType = 3
fontColor = (0,255,0)
while(cap.isOpened()):
ret, frame = cap.read()
#frame = cv2.resize(frame, (1280,720))
#frame = cv2.flip(cv2.transpose(frame), flipCode=1)
if ret==True:
xFace = detector.detect_faces(frame)
if xFace:
xBox = xFace[0]['box']
xBox = list(map(abs, xBox))
x = xBox[0];
y = xBox[1];
w = xBox[2];
h = xBox[3];
crop = frame[y:y+w,x:x+h]
#crop = frame[xBox[1]:xBox[1]+xBox[3],xBox[0]:xBox[0]+xBox[2]]
#cv2.imshow('crop',crop)
cv2.rectangle(frame,(xBox[0], xBox[1]),(xBox[0]+xBox[2],xBox[1]+xBox[3]),(0,255,0),2);
crop_rgb = cv2.cvtColor(crop, cv2.COLOR_BGR2RGB)
crop_rgb = cv2.resize(crop_rgb, (256,256))
crop_hsv = cv2.cvtColor(crop_rgb, cv2.COLOR_RGB2HSV)
crop_rgb = crop_rgb / 255
crop_hsv = crop_hsv / 255
image = np.concatenate([crop_rgb, crop_hsv], axis=2)
extended_img = np.expand_dims(image, axis=0)
extended_label = np.ones(shape=(1,1))
image_ts, label_ts = tf.numpy_function(_from_np_to_tf_func, [extended_img, extended_label], [tf.float32, tf.float32])
with tf.GradientTape() as tape:
dmap_pred, cls_pred, route_value, leaf_node_mask = dtn(image_ts, label_ts, False)
# Fusion score
dmap_score = leaf_l1_score(dmap_pred, leaf_node_mask)
cls_score = leaf_l1_score(cls_pred,leaf_node_mask)
print("dmap_score = " + str("%.3f\n" % dmap_score))
print("cls_score = " + str("%.3f\n" % cls_score))
if dmap_score <= 0.1 and cls_score <= 0.2:
last_score = 0.2
elif dmap_score > 0.1 and dmap_score <= 0.2 and cls_score <= 0.3:
last_score = 0.3
elif dmap_score > 0.2 and dmap_score <= 0.3 and cls_score <= 0.4:
last_score = 0.4
elif dmap_score > 0.3 and dmap_score <= 0.4 and cls_score >= 0.6:
last_score = 0.6
elif dmap_score > 0.4 and dmap_score <= 0.45 and cls_score >= 0.8:
last_score = 0.75
elif dmap_score > 0.45 and cls_score >= 0.9:
last_score = 0.85
else:
last_score = 0.5
if(last_score < 0.5):
result = "Real"
fontColor = (0,255,0)
elif(last_score == 0.5):
result = "Unknown"
fontColor = (0,255,255)
else:
result = "Fake"
fontColor = (0,0,255)
print(result + " " + str("%.3f\n" % last_score))
cv2.rectangle(frame,(x,y),(x+w,y+h),fontColor,lineType);
cv2.putText(frame,result,(x,y-10),font,fontScale,fontColor,lineType)
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
cv2.destroyAllWindows()
| [
"[email protected]"
] | |
7cdcb92bb8715988a8d3f207dcad0d300ffd2361 | 58726ad7b35626ce83e0d38d1866af8952463cc9 | /letters.py | 7b7946298583ff1f8fcab61dc0fb99758e10a8e2 | [] | no_license | misterye/python | afe85fecde0474b0849fd69ac98e23f799e2b197 | 70fef8727abaf6426e2e507ec494b8141a8f7699 | refs/heads/master | 2021-05-31T06:01:18.914592 | 2016-03-01T08:44:22 | 2016-03-01T08:44:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,392 | py | from swampy.TurtleWorld import *
import math
world = TurtleWorld()
bob = Turtle()
bob.delay = 0.01
def polyline(t, n, length, angle):
for i in range(n):
fd(t, length)
rt(t, angle)
def arc(t, r, angle):
arc_length = 2*math.pi*r*angle/360.0
n = int(arc_length/3)+1
step_length = float(arc_length/n)
step_angle = float(angle)/n
polyline(t, n, step_length, step_angle)
def draw_a(t, length, angle):
rt(t, float(angle)/2+90)
fd(t, length)
pu(t)
rt(t, 180)
fd(t, length)
rt(t,180.0-float(angle))
pd(t)
fd(t, length)
pu(t)
lt(t, 180.0)
fd(t, length)
lt(t, 180.0-float(angle))
fd(t, 2*length/3)
lt(t, 90+float(angle)/2)
pd(t)
fd(t, 2*(2*length/3*math.cos(math.pi/2-2*math.pi*angle/2/360.0)))
pu(t)
fd(t, 200)
draw_a(bob, 70, 50)
def draw_b(t, length):
pd(t)
rt(t)
fd(t, float(length))
lt(t)
fd(t, float(length*0.3))
lt(t)
pu(t)
fd(t, float(length/2))
lt(t)
pd(t)
fd(t, float(length*0.3))
pu(t)
rt(t)
fd(t, float(length/2))
rt(t)
pd(t)
fd(t, float(length/4))
for i in range(2):
arc(t, float(length)/4, 184.0)
# pu(t)
# lt(t)
# fd(t, 1)
# lt(t)
# pd(t)
lt(t, 180)
fd(t, length*0.04)
pu(t)
lt(t, 180)
fd(t, 200)
draw_b(bob, 170)
wait_for_user()
| [
"[email protected]"
] | |
9f8af9b9017ff8043f2e5e6554d7330ea3de199d | 073294166acbe3eba7af0c4e0d84ce5fc3b08576 | /segmentation/transforms.py | 6543b0dc0224ef2edf1c08c55219e911e9cbe904 | [
"MIT"
] | permissive | sofroniewn/pytorch-segmentation | 7f0c72e12478fb7434f7a91544595961fb0be4bb | 0d2c6f874ed10973b860fe99b557baa691d584b7 | refs/heads/master | 2021-08-28T08:34:11.306328 | 2017-12-11T18:49:07 | 2017-12-11T18:49:07 | 111,135,869 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,043 | py | '''
Some transforms taken from
https://github.com/ZijunDeng/pytorch-semantic-segmentation
'''
from numpy import array, int32, random, asarray
from numpy import linspace, meshgrid, dstack, vstack, sin
from numpy.random import normal
from skimage.transform import estimate_transform, warp
from PIL import Image, ImageOps
import torch
import numbers
class MaskToTensor(object):
def __call__(self, img):
imgarray = array(img, dtype=int32)
return torch.from_numpy(imgarray/imgarray.max()).long()
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img, mask):
assert img.size == mask.size
for t in self.transforms:
img, mask = t(img, mask)
return img, mask
class FreeScale(object):
def __init__(self, size):
self.size = tuple(reversed(size)) # size: (h, w)
def __call__(self, img, mask):
assert img.size == mask.size
return img.resize(self.size, Image.BILINEAR), mask.resize(self.size, Image.NEAREST)
class Scale(object):
def __init__(self, size):
self.size = size
def __call__(self, img, mask):
assert img.size == mask.size
w, h = img.size
if (w <= h and w == self.size) or (h <= w and h == self.size):
return img, mask
if w < h:
ow = self.size
oh = int(self.size * h / w)
return img.resize((ow, oh), Image.BILINEAR), mask.resize((ow, oh), Image.NEAREST)
else:
oh = self.size
ow = int(self.size * w / h)
return img.resize((ow, oh), Image.BILINEAR), mask.resize((ow, oh), Image.NEAREST)
class RandomHorizontallyFlip(object):
def __call__(self, img, mask):
r = torch.rand(1).numpy()
if r < 0.5:
return img.transpose(Image.FLIP_LEFT_RIGHT), mask.transpose(Image.FLIP_LEFT_RIGHT)
return img, mask
class RandomVerticallyFlip(object):
def __call__(self, img, mask):
r = torch.rand(1).numpy()
if r < 0.5:
return img.transpose(Image.FLIP_TOP_BOTTOM), mask.transpose(Image.FLIP_TOP_BOTTOM)
return img, mask
class CenterCrop(object):
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, img, mask):
assert img.size == mask.size
w, h = img.size
th, tw = self.size
x1 = int(round((w - tw) / 2.))
y1 = int(round((h - th) / 2.))
return img.crop((x1, y1, x1 + tw, y1 + th)), mask.crop((x1, y1, x1 + tw, y1 + th))
class RandomRotate(object):
def __init__(self, degree):
self.degree = degree
def __call__(self, img, mask):
rotate_degree = torch.rand(1).numpy() * 2 * self.degree - self.degree
return img.rotate(rotate_degree, Image.BILINEAR), mask.rotate(rotate_degree, Image.NEAREST)
class RandomCrop(object):
def __init__(self, size, padding=0):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
self.padding = padding
def __call__(self, img, mask):
if self.padding > 0:
img = ImageOps.expand(img, border=self.padding, fill=0)
mask = ImageOps.expand(mask, border=self.padding, fill=0)
assert img.size == mask.size
w, h = img.size
th, tw = self.size
if w == tw and h == th:
return img, mask
if w < tw or h < th:
return img.resize((tw, th), Image.BILINEAR), mask.resize((tw, th), Image.NEAREST)
r = torch.rand(1).numpy()
x1 = int(r*(w - tw))
r = torch.rand(1).numpy()
y1 = int(r*(h - th))
return img.crop((x1, y1, x1 + tw, y1 + th)), mask.crop((x1, y1, x1 + tw, y1 + th))
class RandomWarp(object):
def __init__(self, controlpoints, scale):
if isinstance(controlpoints, numbers.Number):
self.controlpoints = (int(controlpoints), int(controlpoints))
else:
self.controlpoints = controlpoints
self.scale = scale
def __call__(self, img, mask):
cols = img.size[1]
rows = img.size[0]
src_cols = linspace(0, cols, self.controlpoints[1])
src_rows = linspace(0, rows, self.controlpoints[0])
src_rows, src_cols = meshgrid(src_rows, src_cols)
src = dstack([src_cols.flat, src_rows.flat])[0]
dst_rows = src[:, 1] + self.scale*torch.randn(src[:, 1].shape).numpy()
dst_cols = src[:, 0] + self.scale*torch.randn(src[:, 1].shape).numpy()
dst = vstack([dst_cols, dst_rows]).T
tform = estimate_transform('piecewise-affine', src, dst)
warped_img = warp(asarray(img), tform, output_shape=img.size)
warped_mask = warp(asarray(mask), tform, output_shape=mask.size)
return Image.fromarray((255*warped_img).astype('uint8')), Image.fromarray((255*warped_mask).astype('uint8'))
| [
"[email protected]"
] | |
7ed9978c014fddeb187157e735facdae45341a67 | 7b4f39225305df655129deda553462f2c770b95f | /resources/SyncDb/sync_rest.py | a10475e808151805ce21289e7c9960367495da71 | [] | no_license | npospelov/docker-osm-server | bae95a8297156ee689e589bcc172b6a6acfc4393 | a2d180f36980eede4079a7bc6a7b6b6c403824c7 | refs/heads/main | 2023-02-21T11:03:02.282131 | 2021-01-21T11:39:59 | 2021-01-21T11:39:59 | 331,608,777 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,608 | py | import requests
import configparser
import lxml
import lxml.etree
import os
import subprocess
import logging
class CmdCsList:
def __init__(self):
self.config = configparser.ConfigParser()
self.osm_host = None
try:
self.config.read('sync_db.ini')
#print ([x for x in self.config['DEFAULT'].keys()])
self.osm_host = self.config['DEFAULT'].get('osm_host')
except:
logging.error("cannot get 'osm_host' from config")
def exec(self):
"""
ะพะฟัะตะดะตะปะธัั ะฒัะต ะผะฝะพะถะตััะฒะพ changeset-ะพะฒ ะธ ะฒัะฑัะฐัั ะฟะพัะปะตะดะฝะธะน
:return:
"""
cs_list = []
if self.osm_host is None:
return None
url = "http://{0}:3000/api/0.6/changesets".format(self.osm_host)
resp = requests.get(url)
cs_list = []
if resp.status_code == 200:
tree = None
try:
tree = lxml.etree.fromstring(bytes(resp.text,encoding='utf-8'))
except Exception as e:
logging.error("Error: cannot parse changeset list due to %s!" % str(e))
#print (resp.text)
return None
cs = tree.xpath("/osm/changeset[@id]")
for it in cs:
cs_id = int(it.get('id'))
cs_list.append(cs_id)
cs_list.sort()
return cs_list
class CmdGetChangeset:
def __init__(self):
self.config = configparser.ConfigParser()
self.osm_host = None
self.osc_dir = None
try:
self.config.read('sync_db.ini')
#print ([x for x in self.config['DEFAULT'].keys()])
self.osm_host = self.config['DEFAULT'].get('osm_host')
self.osc_dir = self.config['OVERPASS'].get('osc_dir')
except:
logging.error("cannot get 'osm_host','osc_dir' from config")
if not os.path.exists(self.osc_dir):
os.makedirs(self.osc_dir)
#self.change_xml_dict = dict()
def exec(self,cs_num):
"""
ะพะฟัะตะดะตะปะธัั XML c ะทะฐะดะฐะฝะฝัะผ ะฒ cs_num changeset-ะพะผ
:return:
"""
if self.osm_host is None:
logging.error("Error: cannot detect osm_host!")
return None
url = "http://{0}:3000/api/0.6/changeset/{1}/download".format(self.osm_host, cs_num)
resp = requests.get(url)
if resp.status_code == 200:
fname ='{0}/{1}.osc'.format(self.osc_dir,cs_num)
#print ("fname=%s" % fname)
try:
ff = open(fname,"wb")
ff.write(bytes(resp.text,encoding='utf-8'))
ff.close()
except Exception as e:
logging.error("Error: cannot open file %s!" % fname)
#print (resp.text)
return None
return resp.text
else:
logging.error("Error: request %s for url=\'%s\'" % (resp.status_code,url))
return None
# def mergeXml(self):
# res_tree = None
# for xkey in self.change_xml_dict.keys():
# try:
# ctree = lxml.etree.fromstring(bytes(self.change_xml_dict.get(xkey), encoding='utf-8'))
# if res_tree is None:
# res_tree = ctree
# continue
# except Exception as e:
# print("Error: cannot parse changeset list due to %s!" % str(e))
# #print (resp.text)
# return None
#
# ins_position = res_tree
| [
"[email protected]"
] | |
77c86704dd758d9cd196693c271faf8f586f753b | ee9daa15a58655f460fd2dec82bc6afa3f6af54f | /class_0605/class_1.py | 41baf4c608d0c8918caced5d83a765c04f07c7b5 | [] | no_license | januarytw/Python_auto | a2aebf4240ed14ceddafaaec382dca35dcd8f309 | 851562a67936f6f71f1d5dcb25075af84300524c | refs/heads/master | 2020-03-19T03:04:27.333049 | 2018-08-13T10:03:35 | 2018-08-13T10:03:35 | 135,692,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 800 | py | #ๅๅงๅๅฝๆฐ ๏ผๅบไบงๅฐฑ่ชๅธฆ็๏ผ def __init__(self)
# ็น็น๏ผ
# 1 ๅๆฎ้ๅฝๆฐไธๆ ท๏ผๆๅ
ณ้ฎๅญself
# 2 ไปๅฏไปฅๅไฝๅถๅๆฐใ้ป่ฎคๅๆฐใๅจๆๅๆฐ
# 3 ไปๆฒกๆ่ฟๅๅผ
# 4 ไฝ็จ๏ผๆฏไธไธชๅฎไพๅๅปบ็ๆถๅ ๏ผ้ฝไผ่ชๅจ็ๅธฆไธinitๅฝๆฐ้้ข็ๅๆฐ
# 5 ไฝ ่ช่ฎคไธบๆฏ่ฟไธช็ฑปๅฟ
้กป่ฆๅ
ทๅค็ๅฑๆง๏ผ่ฏทๆพๅฐinitๅฝๆฐ้้ข
class user():
def __init__(self,name,content):
self.name=name
self.content=content
def descirber_user(self):
print("่ฏฅ็จๆท็ๅๅญ ๆฏ%s"%self.name)
def greet_user(self):
print (self.content,self.name)
if __name__=="__main__":#python ็จๅบๅ
ฅๅฃ ๅชๆๅจๅฝๅๆจกๅๆง่ก็ๆถๅ๏ผๆไผๆง่ก
u=user("zhang","ๆฉไธๅฅฝ")
u.descirber_user()
u.greet_user() | [
"[email protected]"
] | |
06c99fc729f976a78ae3c07d3fcbb490818a308a | 51e07e7d64d9e281728aa49d216c9f7df3af48f8 | /face_detection.py | 090978b39d40d77440f43c2a4e05470f7970f756 | [] | no_license | pawantilara/OpenCv-program | 2e10b8b4075b384c854f4c041f209681d4cce8ef | 286f73db88674cc13af05f815f16a820e0174007 | refs/heads/master | 2021-10-22T17:48:30.092883 | 2019-03-12T08:50:50 | 2019-03-12T08:50:50 | 83,038,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,903 | py |
# OpenCV program to detect face in real time
# import libraries of python OpenCV
# where its functionality resides
import cv2
# load the required trained XML classifiers
# https://github.com/Itseez/opencv/blob/master/
# data/haarcascades/haarcascade_frontalface_default.xml
# Trained XML classifiers describes some features of some
# object we want to detect a cascade function is trained
# from a lot of positive(faces) and negative(non-faces)
# images.
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
# https://github.com/Itseez/opencv/blob/master
# /data/haarcascades/haarcascade_eye.xml
# Trained XML file for detecting eyes
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
# capture frames from a camera
cap = cv2.VideoCapture(1)
# loop runs if capturing has been initialized.
while 1:
# reads frames from a camera
ret, img = cap.read()
# convert to gray scale of each frames
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Detects faces of different sizes in the input image
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
# To draw a rectangle in a face
cv2.rectangle(img,(x,y),(x+w,y+h),(255,255,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
# Detects eyes of different sizes in the input image
eyes = eye_cascade.detectMultiScale(roi_gray)
#To draw a rectangle in eyes
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,127,255),2)
# Display an image in a window
cv2.imshow('img',img)
# Wait for Esc key to stop
k = cv2.waitKey(30) & 0xff
if k == 27:
break
# Close the window
cap.release()
# De-allocate any associated memory usage
cv2.destroyAllWindows
| [
"[email protected]"
] | |
3d6d4a55f752a49e7a3f5a20221fd3fd424424ea | f6d5896526ff295cd9619b449c67e7dcfba5ef6b | /CodeWars/Python/sum_of_positive.py | 8710e12269130572ab7c07a3674a008fdc16f7d4 | [] | no_license | cmondorf/Code-dojo | 677bd6ff8613e17f9d119fe60310a5575cae392d | 0afdb764225c82a31140981b11f7f259dc25f355 | refs/heads/master | 2022-05-05T21:15:58.236613 | 2022-04-30T06:31:13 | 2022-04-30T06:31:13 | 64,531,726 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | # sum of positive array items
def positive_sum(arr):
sum = 0
for element in arr:
if element > 0:
sum += element
return sum
| [
"[email protected]"
] | |
767e6a87c329adf36582637a53599d9264249f67 | 54dda0bde0546125735aa28f5fb44eabd4a90457 | /redact_pdf.py | c07d78ee53a3b54b44dfe546fd49c6893c10189b | [] | no_license | jcausey-astate/redact_pdf | 2fe78964d88248c7ea40d76866e09adc4c466ca5 | b93ad37081e3bc81fdb0f449ebe0518deabfaa13 | refs/heads/master | 2020-04-09T03:30:32.992942 | 2013-10-03T15:07:09 | 2013-10-03T15:07:09 | 13,299,770 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,119 | py | ## ###########################################################################################
# Redact a PDF by applying a redaction mask (also a PDF file) to the specified pages.
# Default is to save output back over the input file, and to apply the redaction mask to
# the first page only. This can be changed in options. Also, if redaction mask is multi-
# paged, it will be applied page-for-page to the input file.
#
# IMPORTANT SECURITY NOTE:
# The redaction performed here will be secure only if the underlying data is image
# data - NOT if the underlying data is PDF text. This software is designed to be
# used to redact images from scanned documents, not for PDF files generated directly
# from software. PDF text that is under the redaction mark will STILL BE RECOVERABLE,
# SEARCHABLE, AND SELECTABLE.
#
# usage: redact.py [opts] inputfile redactionmask [outputfile]
# opts:
# -h show usage
# -v --verbose show more output
# -p --page page(s) to apply mask to (default is page 1 only)
# -a --all apply mask to all pages
# note:
# The -a and -p options are meaningless if the redactionmask has multiple pages.
# If that is the case, pages from the redactionmask will be applied to the input
# on a page-by-page basis (until one file or the other runs out of pages).
#
# License: Copyright (c) 2013 Jason L Causey,
# Distributed under the MIT License (MIT):
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# ############################################################################################
from PyPDF2 import PdfFileWriter, PdfFileReader
import argparse, os, uuid
verbose = False # Global verbosity indicator
def rangeexpand(txt):
"""
List range expansion function
(found at http://rosettacode.org/wiki/Range_expansion#Python )
"""
lst = []
for r in txt.split(','):
if '-' in r[1:]:
r0, r1 = r[1:].split('-', 1)
lst += range(int(r[0] + r0), int(r1) + 1)
else:
lst.append(int(r))
return lst
def vprint(msg):
"""
Print message to the screen only if verbose mode is activated.
"""
global verbose
if(verbose):
print(msg)
# Set up the argument parser
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--verbose", action="store_true", default=False,
help="show more output")
pagegroup = parser.add_mutually_exclusive_group()
pagegroup.add_argument("-p", "--pages", type=str, default="1",
help="List or range of pages (ex: 1,4-6 would redact page 1 and 4 through 6).")
pagegroup.add_argument("-a", "--all", action="store_true",
help="redact all pages")
parser.add_argument("inputfile", type=str,
help="input PDF file")
parser.add_argument("redactionmask", type=str,
help="PDF file containing the redaction mask")
parser.add_argument("outputfile", type=str, nargs="?", default="",
help="output file name (default is to overwrite input file)")
# Get incoming options and open files:
args = parser.parse_args()
inputfile = args.inputfile
pdfout = PdfFileWriter()
input_stream = file(args.inputfile, "rb")
pdfin = PdfFileReader(input_stream)
redaction_mask = PdfFileReader(file(args.redactionmask, "rb"))
redact_multipage = True if redaction_mask.getNumPages() > 1 else False
outputfile = args.inputfile if args.outputfile == "" else args.outputfile
verbose = True if args.verbose else False
# If the input file == the output file, PyPDF2 has an issue where you can't actually overwrite
# on-the-fly, (it seems to do a lazy-read of the files that contribute to the output document)
# so generate a temporary output file name:
overwrite_input_file = False
if(inputfile == outputfile):
# Generated by appending a UUID to the end of the filename
outputfile = outputfile + "." + str(uuid.uuid4())
overwrite_input_file = True
# Determine list of pages to redact:
if(not args.all):
redact_pages = map(lambda x: x - 1, rangeexpand(args.pages))
else:
redact_pages = range(pdfin.getNumPages())
# If the redactionmask file has multiple pages, it will determine which pages
# we redact:
multi_page_mask = False
if(redaction_mask.getNumPages() > 1):
vprint("Applying multi-page redaction mask from " + args.redactionmask)
redact_pages = range(min(pdfin.getNumPages(), redaction_mask.getNumPages()))
multi_page_mask = True
redact_pages.sort()
redact_pages = filter(lambda x: x < pdfin.getNumPages(), redact_pages)
vprint("Input file: " + inputfile + " - " + str(pdfin.getNumPages()) + " pages.")
vprint("Redacting pages: " + str(map(lambda x: x + 1, redact_pages)))
# Process the input file (only if it has more than 0 pages):
if(pdfin.getNumPages() > 0):
# Copy over every page of the input document:
for i in range(pdfin.getNumPages()):
pdfout.addPage(pdfin.getPage(i))
# If redaction should happen on this page, apply it:
if(len(redact_pages) > 0 and redact_pages[0] == i):
redact_pages.pop(0)
if(not multi_page_mask):
pdfout.getPage(i).mergePage(redaction_mask.getPage(0)) # Redact from single-page mask
else:
pdfout.getPage(i).mergePage(redaction_mask.getPage(i)) # Redact from multi-page mask
# finally, write "pdfout" to output file name
output_stream = file(outputfile, "wb")
pdfout.write(output_stream)
output_stream.close()
del pdfout
del pdfin
input_stream.close()
# If we are overwriting the input file, move the temporary output file now:
if(overwrite_input_file):
os.rename(outputfile, inputfile)
# Finished! | [
"[email protected]"
] | |
169b582234ba2aeb59158e40330fe32c2718d051 | e086dc8b0f2afcc6fe3cec65943cd73a9c4b9d45 | /locallibrary/catalog/tests/test_forms.py | 69640442bb71764c3ebf880636d1198f61aa025a | [] | no_license | huyen99/mysite | 6e6e2c15784016a62370a9974785a61a3a17beab | 74f832c5a87bf4752a694c7d02df296119af06f7 | refs/heads/master | 2023-07-08T08:48:57.846318 | 2021-08-11T07:19:14 | 2021-08-11T07:19:14 | 390,216,323 | 0 | 0 | null | 2021-08-11T07:19:15 | 2021-07-28T04:46:48 | Python | UTF-8 | Python | false | false | 1,410 | py | import datetime
from django.test import TestCase
from django.utils import timezone
from catalog.forms import RenewBookForm
class RenewBookFormTest(TestCase):
def test_renew_form_date_field_label(self):
form = RenewBookForm()
self.assertTrue(form.fields['renewal_date'].label is None or form.fields['renewal_date'].label == 'renewal date')
def test_renew_form_date_field_help_text(self):
form = RenewBookForm()
self.assertEqual(form.fields['renewal_date'].help_text, 'Enter a date between now and 4 weeks (default 3).')
def test_renew_form_date_in_past(self):
date = datetime.date.today() - datetime.timedelta(days=1)
form = RenewBookForm(data={'renewal_date': date})
self.assertFalse(form.is_valid())
def test_renew_form_date_too_far_in_future(self):
date = datetime.date.today() + datetime.timedelta(weeks=4) + datetime.timedelta(days=1)
form = RenewBookForm(data={'renewal_date': date})
self.assertFalse(form.is_valid())
def test_renew_form_date_today(self):
date = datetime.date.today()
form = RenewBookForm(data={'renewal_date': date})
self.assertTrue(form.is_valid())
def test_renew_form_date_max(self):
date = timezone.localtime() + datetime.timedelta(weeks=4)
form = RenewBookForm(data={'renewal_date': date})
self.assertTrue(form.is_valid())
| [
"[email protected]"
] | |
99557f8f1c92e921fe2ff9b5e5bb6c8b2689b248 | 6fe664a678625885391c621c8c16cbf61a921cd6 | /br_scraper/src/db.py | 40441214245ba72cb3e28e5680b482f4dde4adeb | [] | no_license | Bralor/bezrealitky_scraper | 1b7293ee9078f628e7b3bd26b522642c5290ee2c | 054c86bd10bbcce682df88d6c037f79f5a0a7298 | refs/heads/master | 2023-07-02T16:56:08.595470 | 2021-08-09T17:45:26 | 2021-08-09T17:45:26 | 380,360,771 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,219 | py | import pymongo
class DbWrapper():
def __init__(self, hostname: str, port: int, db_name: str,
col_name: str, data: list) -> None:
self.hostname = hostname
self.port = port
self.db_name = db_name
self.col_name = col_name
if not data:
raise Exception("There are no data to insert")
else:
self.data = data
def connect_db(self) -> None:
try:
self.client = pymongo.MongoClient(
self.hostname,
self.port
)
except pymongo.errors.ServerSelectionTimeoutError as err:
raise Exception(f"Cannot connect to the db ({err})")
else:
if not self.db_exists() and not self.col_exists():
self.create_collection()
else:
self.db = self.client[self.db_name]
self.collection = self.db[self.col_name]
print("Using existing database and collection")
def db_exists(self) -> bool:
return self.db_name in self.client.list_database_names()
def col_exists(self) -> bool:
return self.col_name in self.client[self.db_name].list_collection_names()
def create_collection(self) -> None:
self.db = self.client[self.db_name]
self.collection = self.db[self.col_name]
print(f"Collection {self.col_name} created")
def add_indexing(self, index_name: str) -> None:
self.collection.create_index(
[(index_name, pymongo.ASCENDING)],
unique=True
)
def write_documents(self, data: list) -> None:
if not data and not isinstance(data, list):
raise Exception("Argument 'data' is not list or is empty")
for document in data:
try:
self.collection.insert_one(document)
except pymongo.errors.DuplicateKeyError:
print("The duplicate index occured, continue..")
def read_documents(self) -> list:
if not self.collection:
raise Exception("Argument 'collection' is empty")
return [
document
for document in self.collection.find({})
]
| [
"[email protected]"
] | |
6b8b8673351f44ae0d096eb7c11b5cab78dafa55 | 95b57cb90ea0625ede16679b0a6a324342c1ec28 | /stars/apps/api/wishlists/urls.py | e9dea5c23a9a266486d864516a41d97e3fec33d7 | [] | no_license | lisongwei15931/stars | 2814f5cc9d08dd26a25048f91b27ff1607a659cb | 3d6198c2a1abc97fa9286408f52c1f5153883b7a | refs/heads/master | 2022-11-27T07:08:52.048491 | 2016-03-18T09:33:55 | 2016-03-18T09:33:55 | 54,242,594 | 0 | 0 | null | 2022-11-22T00:36:28 | 2016-03-19T02:10:35 | Python | UTF-8 | Python | false | false | 324 | py | # -*- coding: utf-8 -*-
from django.conf.urls import url
from stars.apps.api.wishlists.views import AppMyFavProduct, AppMyFavListView
urlpatterns = (
url(r'^list/$', AppMyFavListView.as_view(), name='api-myfav-list'),
url(r'product/(?P<product_pk>\d+)/$', AppMyFavProduct.as_view(), name='api-myfav-product'),
)
| [
"[email protected]"
] | |
dc0f7a69c5550004b0d73e7b5a648ba9ee3fbacc | 1c861897966a33934d42bfe9abc33f36c4459a58 | /deleter.py | 843f4d447ab448d28103c108ea2d21b22ef331a6 | [] | no_license | zackseliger/Insta-Bot | 79497c7c5a3a6fe9efb1e3a174eb0385759c5a18 | b81a657ce550b5fbc57a8d707d25738003a2982a | refs/heads/master | 2021-01-01T10:48:58.879814 | 2020-07-25T06:19:30 | 2020-07-25T06:19:30 | 239,245,434 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,212 | py | from Manager import Manager
from Account import Account
from time import sleep
# create manager and account
manager = Manager()
account = Account('accounts/test.acc')
# run account
manager.openAccount(account)
sleep(10)
manager.browser.browser.get('https://instagram.com/'+account.username)
manager.browser.save_cookies(account.cookiesPath)
# RUN THE CODE
manager.browser.browser.execute_script('''(async function(){
/* utility */
function getCookie(val) {
let result = "";
document.cookie.split(';').some(item => {
itemArray = item.split('=');
if (itemArray[0] === val) result = itemArray[1]
})
return result;
}
/* get date ranges */
startDate = Date.parse(prompt("Enter start date (oldest date) of posts to delete (yyyy-mm-dd format)"))/1000;
endDate = Date.parse(prompt("Enter most recent date (yyyy-mm-dd format)"))/1000;
if (endDate-startDate < 0) {
alert("most recent date cannot be before oldest date!");
return;
}
if (isNaN(startDate)) {
alert("you didn't enter anything in for the start date...");
return;
}
/* grab all the posts */
p = [];
added = true;
while (added === true) {
added = false;
aTags = document.getElementsByTagName('a');
for (let i = 0; i < aTags.length; i++) {
if (aTags[i].href.indexOf('/p/') !== -1) {
postId = aTags[i].href.substring(aTags[i].href.indexOf('/p/')+3, aTags[i].href.length-1)
if (p.indexOf(postId) === -1) {
p.push(postId);
added = true;
}
}
}
window.scrollBy(0,1000);
await new Promise(r=>setTimeout(r,500));
window.scrollBy(0, 100);
await new Promise(r=>setTimeout(r,1000));
}
console.log(p);
console.log('start: '+startDate);
console.log('end: '+endDate);
/* get info for each posts and delete it if it falls within the rnage */
for (let i = 0; i < p.length; i++) {
if (p[i].indexOf('/') !== -1) continue;
thingthatmessedup = "";
fetch('https://instagram.com/p/'+p[i]+'?__a=1')
.then(res => res.text())
.then(response => {
thingthatmessedup = response;
response = JSON.parse(response);
id = response.graphql.shortcode_media.id;
timestamp = response.graphql.shortcode_media.taken_at_timestamp;
if (startDate < timestamp && startDate+(endDate-startDate) > timestamp) {
console.log("delete "+id+" at timestamp "+timestamp)
fetch('https://www.instagram.com/create/'+id+'/delete/', {
method: 'POST',
credentials: 'include',
headers: {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.5',
'Connection': 'keep-alive',
'Host': 'www.instagram.com',
'Origin': 'https://www.instagram.com',
'TE': 'Trailers',
'Content-Length': '0',
'Content-Type': 'application/x-www-form-urlencoded',
'X-IG-App-ID': '1217981644879628',
'X-Requested-With': 'XMLHttpRequest',
'X-Instagram-AJAX': '62d0c4ff7fec',
'X-CSRFToken': getCookie('csrftoken'),
'X-IG-WWW-Claim': getCookie('x-ig-set-www-claim')||'0'
}
})
.then(res => res.text())
.then(res => console.log(res))
.catch(err => {alert("error deleting: "+err)})
}
})
.catch(err => { alert("error: "+err);console.log(thingthatmessedup) })
await new Promise(r=>setTimeout(r,100));
}
alert("should be done deleting!");
})()''') | [
"[email protected]"
] | |
11173608354abf6a88f53152b6b7f1d628d87afe | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Games/Pygame Tutorials/examples/steering.py | 304d68a6b4c11dccbabc301fe733570a646282bf | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:4721637f392de26806bdc6150291bb24ce5c59aa4dfc8dcd866c9e28b0841d70
size 4685
| [
"[email protected]"
] | |
5ef0d67c25609f004301903bd5332bdf4b049348 | 0371ca2b176f3227290dac292531137be9b001de | /IntervalosDeTempo.py | 52420808a09fc8ee5854c572c5d9a9151748ca25 | [] | no_license | DiolanGodinho/EmprestimoDeBicicletas | 68006d72a24fe8de086cddfd1c938801680a6460 | 2501ea28083a472f5bb020e21ce55f9917efdcbf | refs/heads/main | 2023-07-17T11:58:01.212767 | 2021-08-30T17:09:48 | 2021-08-30T17:09:48 | 399,659,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,116 | py | from datetime import timedelta
temposDeEmprestimo = [
# Intervalos de tempo para simular emprรฉstimos de bicicletas.
timedelta(days=2, hours=1, minutes= 50),
timedelta(days=3, hours=6, minutes= 40),
timedelta(days=4, hours=11, minutes= 30),
timedelta(days=5, hours=16, minutes= 20),
timedelta(days=6, hours=21, minutes= 10),
timedelta(weeks=2, days=2, hours=2, minutes= 10),
timedelta(weeks=2, days=3, hours=5, minutes= 10),
timedelta(weeks=2, days=4, hours=8, minutes= 10),
timedelta(weeks=1, days=5, hours=11, minutes= 20),
timedelta(weeks=1, days=6, hours=14, minutes= 20),
timedelta(weeks=1, days=1, hours=17, minutes= 20),
timedelta(weeks=2, days=2, hours=20, minutes= 30),
timedelta(weeks=2, days=3, hours=23, minutes= 30),
timedelta(weeks=2, days=4, hours=2, minutes= 30),
timedelta(weeks=1, days=5, hours=5, minutes= 40),
timedelta(weeks=1, days=6, hours=8, minutes= 40),
timedelta(weeks=1, days=1, hours=11, minutes= 40),
timedelta(weeks=2, days=2, hours=14, minutes= 50),
timedelta(weeks=2, days=3, hours=17, minutes= 50)
]
| [
"[email protected]"
] | |
7988d7e41bcb911d2ec0b1a79c1f4557077c580b | 9ae6380635a3308a8ae60945d02afa84c6435feb | /setup.py | 6b323b982686c610e862672c0b786be5fb854e2e | [
"BSD-2-Clause"
] | permissive | kevinbennett/django-url-utils | 64f28c01312babbf02eaa6a43bb507d1e5bc73db | d49654d12a9e230189c49478630548a0562795ca | refs/heads/master | 2020-06-28T19:36:28.740688 | 2013-05-31T11:03:11 | 2013-05-31T11:03:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,046 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
import re
import os
import sys
name = 'django-url-utils'
package = 'url_utils'
description = 'Django template tags for manipulating URLs'
url = 'http://github.com/brightinteractive/django-url-utils/'
author = 'Bright Interactive'
author_email = '[email protected]'
license = 'BSD'
install_requires = ["Django >= 1.3",]
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
init_py = open(os.path.join(package, '__init__.py')).read()
return re.search("^__version__ = ['\"]([^'\"]+)['\"]", init_py, re.MULTILINE).group(1)
def get_packages(package):
"""
Return root package and all sub-packages.
"""
return [dirpath
for dirpath, dirnames, filenames in os.walk(package)
if os.path.exists(os.path.join(dirpath, '__init__.py'))]
def get_package_data(package):
"""
Return all files under the root package, that are not in a
package themselves.
"""
walk = [(dirpath.replace(package + os.sep, '', 1), filenames)
for dirpath, dirnames, filenames in os.walk(package)
if not os.path.exists(os.path.join(dirpath, '__init__.py'))]
filepaths = []
for base, filenames in walk:
filepaths.extend([os.path.join(base, filename)
for filename in filenames])
return {package: filepaths}
if sys.argv[-1] == 'publish':
os.system("python setup.py sdist upload")
args = {'version': get_version(package)}
print "You probably want to also tag the version now:"
print " git tag -a v%(version)s -m 'Version %(version)s'" % args
print " git push --tags"
sys.exit()
setup(
name=name,
version=get_version(package),
url=url,
license=license,
description=description,
author=author,
author_email=author_email,
packages=get_packages(package),
package_data=get_package_data(package),
install_requires=install_requires
)
| [
"[email protected]"
] | |
ebf8f5ade339bfb5d20cea85361876424f1ac995 | e2ae5c6d1d3ff9c512d526b1b4d7d7b64d50e87d | /py/leetcode/462.py | 1d4d727740ec0c4564734565c5d9d36269ff7c4c | [] | no_license | wfeng1991/learnpy | 59ed66d0abc2947c2f73c0bfe3901ef45ba5eb56 | e5b018493bbd12edcdcd0434f35d9c358106d391 | refs/heads/master | 2021-01-23T07:35:08.376547 | 2018-09-28T02:16:31 | 2018-09-28T02:16:31 | 86,430,476 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 603 | py | class Solution(object):
def minMoves21(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
# from functools import reduce
# sumv=reduce(lambda x,y:x+y,nums)
m=float('inf')
for n in nums:
t=0
for i in nums:
if i!=n:
t+=abs(n-i)
m=min(m,t)
return m
def minMoves2(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
median = sorted(nums)[len(nums) / 2]
return sum(abs(num - median) for num in nums)
| [
"[email protected]"
] | |
0a296b5ad1b1dcedf0f5d1bbe9c425e3fee72bdf | 7aed4029c9a8149aad133df44d6abfd306bcd70a | /2018/day13/part1.py | 0ff06126bac8201744954edc6fe82d44b1803849 | [] | no_license | janosgyerik/advent-of-code | b669902aa9efc0f8fded1012932602d9ccb693e7 | dd12d0c31d6766abccaeebc5474f0855df66f561 | refs/heads/master | 2023-04-01T19:33:21.500017 | 2020-12-24T06:22:01 | 2020-12-24T06:22:01 | 160,064,435 | 0 | 0 | null | 2023-03-30T06:51:35 | 2018-12-02T16:02:21 | Python | UTF-8 | Python | false | false | 5,262 | py | #!/usr/bin/env python
import sys
from collections import deque
class Graph:
def __init__(self):
self.carts = []
self.lines = []
def add_line(self, line):
self.lines.append(list(line))
def add_cart(self, cart):
self.carts.append(cart)
def move_carts(self):
sorted_carts = sorted(self.carts, key=lambda c: (c.pos.y, c.pos.x))
taken = set(cart.pos for cart in self.carts)
for cart in sorted_carts:
old_pos = cart.pos
cart.pos += cart.direction
if cart.pos in taken:
return cart.pos
taken.add(cart.pos)
taken.remove(old_pos)
if cart.direction == D_LEFT:
if self.at(cart.pos) == '\\':
cart.direction = D_UP
elif self.at(cart.pos) == '/':
cart.direction = D_DOWN
elif self.at(cart.pos) == '+':
cart.turn()
elif self.at(cart.pos) in '-|':
pass
else:
raise ValueError('Unexpected position: ' + self.at(cart.pos))
elif cart.direction == D_RIGHT:
if self.at(cart.pos) == '\\':
cart.direction = D_DOWN
elif self.at(cart.pos) == '/':
cart.direction = D_UP
elif self.at(cart.pos) == '+':
cart.turn()
elif self.at(cart.pos) in '-|':
pass
else:
raise ValueError('Unexpected position: ' + self.at(cart.pos))
elif cart.direction == D_UP:
if self.at(cart.pos) == '\\':
cart.direction = D_LEFT
elif self.at(cart.pos) == '/':
cart.direction = D_RIGHT
elif self.at(cart.pos) == '+':
cart.turn()
elif self.at(cart.pos) in '-|':
pass
else:
raise ValueError('Unexpected position: ' + self.at(cart.pos))
elif cart.direction == D_DOWN:
if self.at(cart.pos) == '\\':
cart.direction = D_RIGHT
elif self.at(cart.pos) == '/':
cart.direction = D_LEFT
elif self.at(cart.pos) == '+':
cart.turn()
elif self.at(cart.pos) in '-|':
pass
else:
raise ValueError('Unexpected position: ' + self.at(cart.pos))
def at(self, pos):
return self.lines[pos.y][pos.x]
class Pos:
def __init__(self, x, y):
self.x = x
self.y = y
def __add__(self, other):
return Pos(self.x + other.x, self.y + other.y)
def __hash__(self):
return self.x * 13 + self.y
def __eq__(self, other):
return self.x == other.x and self.y == other.y
def __str__(self):
return '({}, {})'.format(self.x, self.y)
def __repr__(self):
return self.__str__()
class Cart:
def __init__(self, x, y, c):
self.pos = Pos(x, y)
self.direction = self.parse_direction(c)
self.directions = deque([D_LEFT, Pos(0, 0), D_RIGHT])
def parse_direction(self, c):
if c == '<':
return D_LEFT
if c == '>':
return D_RIGHT
if c == 'v':
return D_DOWN
if c == '^':
return D_UP
raise ValueError('Unexpected direction: ' + c)
def turn(self):
if self.directions[0] == D_LEFT:
if self.direction == D_LEFT:
self.direction = D_DOWN
elif self.direction == D_RIGHT:
self.direction = D_UP
elif self.direction == D_UP:
self.direction = D_LEFT
elif self.direction == D_DOWN:
self.direction = D_RIGHT
else:
raise ValueError('Unexpected cart direction: ' + self.direction)
elif self.directions[0] == D_RIGHT:
if self.direction == D_LEFT:
self.direction = D_UP
elif self.direction == D_RIGHT:
self.direction = D_DOWN
elif self.direction == D_UP:
self.direction = D_RIGHT
elif self.direction == D_DOWN:
self.direction = D_LEFT
else:
raise ValueError('Unexpected cart direction: ' + self.direction)
self.directions.rotate(-1)
def __str__(self):
return '{} {} {}'.format(self.pos, self.direction, self.directions)
def parse_input():
g = Graph()
for y, line in enumerate(sys.stdin.readlines()):
g.add_line(line.replace('v', '|').replace('^', '|').replace('<', '-').replace('>', '-'))
for x, c in enumerate(line):
if c in '<>v^':
cart = Cart(x, y, c)
g.add_cart(cart)
return g
D_UP = Pos(0, -1)
D_DOWN = Pos(0, 1)
D_LEFT = Pos(-1, 0)
D_RIGHT = Pos(1, 0)
if __name__ == '__main__':
g = parse_input()
for i in range(1000):
pos = g.move_carts()
if pos:
print(i, pos, g.lines[pos.y][pos.x])
break
| [
"[email protected]"
] | |
b167ff6601ea583fec0edfa4ff1464a3e3ce9178 | 30ac20df59064a40ef19d2fec909a56c6c138d13 | /archive/src V2/test.py | 7bc5725eed7772e5a854da01822f2e51c1a18117 | [] | no_license | stevenleigh/sib | 02c51ddd12aefbbb8b6d319a3cbb3c520cc79d50 | 9a29a2866041b55048f7f1d37ff488ccd28bf660 | refs/heads/master | 2016-09-06T14:06:20.126620 | 2014-06-11T14:00:11 | 2014-06-11T14:00:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,639 | py |
from file_blob import file_blob
from commit_blob import commit_blob
from local_blob_manager import local_blob_manager
import time
from multiprocessing import Process
import os
#from peer_manager import peer_manager
import peer_service
import xmlrpclib
import random
import logging
import shutil
logging.basicConfig(
filename='test.log',
filemode='w',
format='%(asctime)s | %(process)d | %(processName)s | %(levelname)s | %(module)s | %(funcName)s | %(message)s',
level=logging.DEBUG)
logging.debug('testing started')
key=b'Sixteen byte key'
peer_A_storage = '../resource/peer_A_storage' #simulated local
peer_B_storage = '../resource/peer_B_storage' #simulated remote peer
peer_C_storage = '../resource/peer_C_storage' #simulated remote peer
#empty the storage directory
for root, dirs, files in os.walk(peer_A_storage, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
#empty the storage directory
for root, dirs, files in os.walk(peer_B_storage, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
#empty the storage directory
for root, dirs, files in os.walk(peer_C_storage, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
os.mkdir(os.path.join(peer_A_storage, 'test_share'))
print '\n\n'
print '************************************************************************'
print '***Testing initializing a file blob'
logging.debug('Testing initializing a file blob')
print '************************************************************************'
#open a text file and import into file blob
f=open('../resource/sample_text_1.txt','rb')
fb=file_blob()
fb.my_hash='1'
fb.display()
fb.compute_delta(key,f.read())
fb.display()
print '\n\n'
print '************************************************************************'
print '***Testing a simple file delta'
logging.debug('Testing a simple file delta')
print '************************************************************************'
#open a 2nd version of text file and compute delta from first version
f2=open('../resource/sample_text_2.txt','rb')
fb2=file_blob()
fb2.my_hash='2'
fb2.display()
fb2.compute_delta(key, f2.read(), fb, os.path.join(peer_A_storage, 'test_share'))
fb2.display()
fb.display()
print '\n\n'
print '************************************************************************'
print '***Testing storing and loading a simple file blob'
logging.debug('Testing storing and loading a simple file blob')
print '************************************************************************'
#encrypt and store the first file blob, then decrypt and load
fb_hash = fb.store(key, os.path.join(peer_A_storage, 'test_share'))
fb3=file_blob()
fb3.my_hash='3'
fb3.load(key, os.path.join(peer_A_storage, 'test_share'), fb_hash)
fb3.display()
print '\n\n'
print '************************************************************************'
print '***Testing loading a whole directory as an initial commit'
logging.debug('Testing loading a whole directory as an initial commit')
print '************************************************************************'
#load a whole directory as an initial commit
bm=local_blob_manager()
commit_hash_1 = bm.commit_directory(key, '../resource/test_directory_1/root',
os.path.join(peer_A_storage, 'test_share'), 'joe.keur', 'first commit msg')
bm.restore_directory(key,'../resource/restore_directory_1', os.path.join(peer_A_storage, 'test_share'),
commit_hash_1)
print '\n\n'
print '************************************************************************'
print '***Testing adding a second commit'
logging.debug('Testing adding a second commit')
print '************************************************************************'
bm=local_blob_manager()
commit_hash_2 = bm.commit_directory(key, '../resource/test_directory_2/root',
os.path.join(peer_A_storage, 'test_share'),'joe.keur','second commit msg',commit_hash_1)
bm.restore_directory(key,'../resource/restore_directory_2', os.path.join(peer_A_storage, 'test_share'), commit_hash_2)
print '\n\n'
print '************************************************************************'
print '***Testing adding a third, more challenging, commit'
logging.debug('Testing adding a third, more challenging, commit')
print '************************************************************************'
bm=local_blob_manager()
commit_hash_3 = bm.commit_directory(key, '../resource/test_directory_3/root',
os.path.join(peer_A_storage, 'test_share'),'joe.keur','third commit msg',commit_hash_2)
bm.restore_directory(key,'../resource/restore_directory_3', os.path.join(peer_A_storage, 'test_share'), commit_hash_3)
print '\n\n'
print '************************************************************************'
print '***Testing network connections'
logging.debug('Testing network connections')
print '************************************************************************'
command_port = random.randint(20000,50000)
peer_B = peer_service.peer_service(command_port)
peer_B_process = Process(target = peer_B.serve_forever)
peer_B_process.start()
time.sleep(0.1) #wait for peer process and socket creation
print 'server start finished'
peer_A = peer_service.peer_service(command_port+1)
peer_B_proxy = peer_A.pm.connect_machine('machine_B','http://localhost:' +str(command_port))
print peer_B_proxy.ping()
time.sleep(0.1)
peer_B_process.terminate()
print '\n\n'
print '************************************************************************'
print '***Testing network blob transfer to peer'
logging.debug('Testing network blob transfer to peer')
print '************************************************************************'
command_port+=2
peer_B = peer_service.peer_service(command_port)
peer_B.pm.storage_directory = peer_B_storage
peer_B.pm.my_machine_ID = 'machine_B'
peer_B.pm.add_share_to_machine('test_share','machine_B')
peer_B_process = Process(target = peer_B.serve_forever)
peer_B_process.start()
time.sleep(0.1) #wait for peer process and socket creation
print 'server start finished'
peer_A = peer_service.peer_service(command_port+1)
peer_A.pm.storage_directory = peer_A_storage
peer_B_proxy = peer_A.pm.connect_machine('machine_B','http://localhost:' +str(command_port))
print peer_B_proxy.save_file('test_share', fb_hash, xmlrpclib.Binary(f.read()))
time.sleep(0.1)
peer_B_process.terminate()
print '\n\n'
print '************************************************************************'
print '***Testing network blob transfer from peer'
logging.debug('Testing network blob transfer from peer')
print '************************************************************************'
command_port+=2
peer_B = peer_service.peer_service(command_port)
peer_B.pm.storage_directory = peer_B_storage
peer_B.pm.my_machine_ID = 'machine_B'
peer_B.pm.add_share_to_machine('test_share','machine_B')
peer_B_process = Process(target = peer_B.serve_forever)
peer_B_process.start()
time.sleep(0.1) #wait for peer process and socket creation
print 'server start finished'
peer_A = peer_service.peer_service(command_port+1)
peer_A.pm.storage_directory = peer_A_storage
peer_B_proxy = peer_A.pm.connect_machine('machine_B','http://localhost:' +str(command_port))
(peer_B_proxy.get_file('test_share', fb_hash)).data
time.sleep(0.1)
peer_B_process.terminate()
print '\n\n'
print '************************************************************************'
print '***Testing large network blob transfer to peer'
logging.debug('Testing large network blob transfer to peer')
print '************************************************************************'
command_port +=2
large_file = open('../resource/alice.txt','rb')
fb=file_blob()
fb.compute_delta(key,large_file.read())
large_file_hash = fb.store(key, peer_A_storage)
peer_B = peer_service.peer_service(command_port)
peer_B.pm.storage_directory = peer_B_storage
peer_B.pm.my_machine_ID = 'machine_B'
peer_B.pm.add_share_to_machine('test_share','machine_B')
peer_B_process = Process(target = peer_B.serve_forever)
peer_B_process.start()
time.sleep(0.1) #wait for peer process and socket creation
print 'server start finished'
peer_A = peer_service.peer_service(command_port+1)
peer_A.pm.storage_directory = peer_A_storage
peer_B_proxy = peer_A.pm.connect_machine('machine_B','http://localhost:' +str(command_port))
print peer_B_proxy.save_file('test_share', large_file_hash, xmlrpclib.Binary(large_file.read()))
time.sleep(0.1)
peer_B_process.terminate()
print '\n\n'
print '************************************************************************'
print '***Testing receiving all commits form peer'
logging.debug('Testing receiving all commits form peer')
print '************************************************************************'
command_port+=2
peer_A = peer_service.peer_service(command_port)
peer_A.pm.storage_directory = peer_A_storage
peer_A.pm.my_machine_ID = 'machine_A'
peer_A.pm.add_share_to_machine('test_share','machine_A')
peer_A_process = Process(target = peer_A.serve_forever)
peer_A_process.start()
time.sleep(0.1) #wait for peer process and socket creation
print 'server start finished'
peer_C = peer_service.peer_service(command_port+1)
peer_C.pm.storage_directory = peer_C_storage
peer_C.pm.my_machine_ID = 'machine_C'
peer_C.pm.add_share_to_machine('test_share','machine_C')
peer_A_proxy = peer_C.pm.connect_machine('machine_A','http://localhost:' +str(command_port))
peer_C_process = Process(target = peer_C.serve_forever) #start peer C as a new process so it can receive all commits
peer_C_process.start()
time.sleep(0.1) #wait for peer process and socket creation
print peer_A_proxy.get_all_commits('test_share', 'http://localhost:' +str(command_port+1)) #Returns after peer A transfers all commits to peer C.
time.sleep(0.1)
peer_A_process.terminate()
peer_C_process.terminate()
print '\n\n'
print '************************************************************************'
print '***Testing printing all commit info transfered to peer C'
logging.debug('Testing printing all commit info transfered to peer C')
print '************************************************************************'
cb = commit_blob()
for root, dirs, files in os.walk(peer_C_storage):
for name in files:
if name[0]=='_': #commit filenames start with '_'
cb.load(key, peer_C_storage, name)
cb.display()
print '\n\n'
print '************************************************************************'
print '***Testing collecting all blobs for a given commit'
logging.debug('Testing collecting all blobs for a given commit')
print '************************************************************************'
command_port+=2
peer_A = peer_service.peer_service(command_port)
peer_A.pm.storage_directory = peer_A_storage
peer_A.pm.my_machine_ID = 'machine_A'
peer_A.pm.add_share_to_machine('test_share','machine_A')
peer_A_process = Process(target = peer_A.serve_forever)
peer_A_process.start()
time.sleep(0.1) #wait for peer process and socket creation
print 'server start finished'
peer_C = peer_service.peer_service(command_port+1)
peer_C.pm.storage_directory = peer_C_storage
peer_C.pm.my_machine_ID = 'machine_C'
peer_C.pm.add_share_to_machine('test_share','machine_A')
peer_C.pm.add_share_to_machine('test_share','machine_C')
peer_C.pm.connect_machine('machine_A','http://localhost:' +str(command_port))
peer_C.pm.collect_commit_dependencies(key, '_'+commit_hash_3, 'test_share')
time.sleep(0.1)
peer_A_process.terminate()
print '\n\n'
print '************************************************************************'
print '***Testing sending a full directory'
print '************************************************************************'
print '\n\n'
print '************************************************************************'
print '***Testing recieving a full directory'
print '************************************************************************'
print '\n\n'
print '************************************************************************'
print '***Testing sending a directory update'
logging.debug('Testing sending a directory update')
print '************************************************************************'
command_port+=2
peer_C = peer_service.peer_service(command_port)
peer_C.pm.storage_directory = peer_C_storage
peer_C.pm.my_machine_ID = 'machine_C'
peer_C.pm.add_share_to_machine('test_share','machine_C')
peer_C_process = Process(target = peer_C.serve_forever)
peer_C_process.start()
time.sleep(0.1) #wait for peer process and socket creation
print 'server start finished'
peer_A = peer_service.peer_service(command_port+1)
peer_A.pm.storage_directory = peer_A_storage
peer_A.pm.my_machine_ID = 'machine_A'
peer_A.pm.add_share_to_machine('test_share','machine_C')
peer_A.pm.connect_machine('machine_C','http://localhost:' +str(command_port))
peer_A.pm.push_update_to_peer('test_share', 'machine_C')
time.sleep(0.1)
peer_C_process.terminate()
print '\n\n'
print '************************************************************************'
print '***Testing receiving a directory update'
logging.debug('Testing receiving a directory update')
print '************************************************************************'
print '\n\n'
print '************************************************************************'
print '***Test saving and loading peer connection info'
logging.debug('Test saving and loading peer connection info')
print '************************************************************************'
command_port+=2
peer_C = peer_service.peer_service(command_port)
peer_C.pm.storage_directory = peer_C_storage
peer_C.pm.my_machine_ID = 'machine_C'
peer_C.pm.add_share_to_machine('test_share','machine_C')
peer_C.pm.add_share_to_machine('test_share','machine_A')
peer_C_process = Process(target = peer_C.serve_forever)
peer_C_process.start()
time.sleep(0.1) #wait for peer process and socket creation
print 'server start finished'
peer_A = peer_service.peer_service(command_port+1)
peer_A.pm.storage_directory = peer_A_storage
peer_A.pm.my_machine_ID = 'machine_A'
peer_A.pm.add_share_to_machine('test_share','machine_C')
peer_A.pm.add_share_to_machine('test_share','machine_A')
peer_A.pm.connect_machine('machine_C','http://localhost:' +str(command_port))
time.sleep(0.1)
peer_C_process.terminate()
print peer_A.pm.save_peer_info()
peer_A.pm.load_peer_info()
print '\n\n'
print '************************************************************************'
print '***Testing automatic sync'
logging.debug('Testing automatic sync')
print '************************************************************************'
command_port+=2
peer_A = peer_service.peer_service(command_port)
peer_A.pm.storage_directory = peer_A_storage
peer_A.pm.my_machine_ID = 'machine_A'
peer_A.pm.add_share_to_machine('test_share','machine_C')
peer_A.pm.add_share_to_machine('test_share','machine_A')
peer_A_process = Process(target = peer_A.serve_forever)
peer_A_process.start()
time.sleep(0.1) #wait for peer process and socket creation
print 'server start finished'
peer_C = peer_service.peer_service(command_port+1)
peer_C.pm.storage_directory = peer_C_storage
peer_C.pm.my_machine_ID = 'machine_C'
peer_C.pm.add_share_to_machine('test_share','machine_A')
peer_C.pm.add_share_to_machine('test_share','machine_C')
peer_C.pm.connect_machine('machine_A','http://localhost:' +str(command_port))
peer_C.pm.register_auto_sync(key, '../resource/restore_directory_3', 'test_share','auto_sync_user');
#do some file operations. A new commit should be created for each one.
time.sleep(0.1)
#copy a file
shutil.copy('../resource/restore_directory_3/root/alice.txt', '../resource/restore_directory_3/root/alice_copy.txt')
time.sleep(0.1)
#edit a file
f_auto = open('../resource/restore_directory_3/root/alice_copy.txt', 'a')
f_auto.write('a bunch of mumbo jumbo. a bunch of mumbo jumbo. a bunch of mumbo jumbo')
f_auto.close()
time.sleep(0.1)
#remove a file
os.remove('../resource/restore_directory_3/root/alice_copy.txt')
time.sleep(0.1)
time.sleep(0.1)
peer_A_process.terminate()
print '\n\n'
print '************************************************************************'
print '***Testing finished'
logging.debug('Testing finished')
print '************************************************************************'
| [
"[email protected]"
] | |
c98bffd08b4c745b73f1be9657c6bfa2855d4ad2 | 76fb247d9c3dddca923556f4280ba3352f7babe4 | /Constructors.py | 66645614b47a7e71ebe50a9b0d3d92ded7ca929e | [] | no_license | 3DRD/Python_basics | 7b0ea7dbbb9eea31f6af8fe2d5fb50a9e5783dcb | 72f8c39070f959557ea7e86e41239ad5b26a5c67 | refs/heads/master | 2022-12-11T00:07:50.184637 | 2020-08-25T15:15:17 | 2020-08-25T15:15:17 | 290,247,240 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | class Point:
def __init__(self, name):
self.name = name
def talk(self):
print(f"{self.name} Talks")
point1 = Point("DRD")
point1.talk()
| [
"[email protected]"
] | |
67930ac56352aef823f83a76db70e39f80a0bf34 | 4bf1ffdfcd77c526445d59d3349b6069b0517cfb | /identify.py | 2d19b8a899fed1f245f6b2899c72ca27d6488376 | [
"MIT"
] | permissive | StephenApX/BacteriaDetect | 11ac6f227c00947a6c61e0a8186ca3b9ae3f0341 | 69b967391af13922cee3274ad5fb52d7a0836564 | refs/heads/main | 2023-05-26T12:23:53.111612 | 2021-06-11T09:10:59 | 2021-06-11T09:10:59 | 375,795,602 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,410 | py | # coding:utf-8
from os import path, makedirs, listdir
from argparse import ArgumentParser, RawTextHelpFormatter
from datetime import datetime
from copy import deepcopy
import numpy as np
import cv2
from cv2 import imread, imwrite, blur, cvtColor, COLOR_BGR2GRAY, HoughCircles, HOUGH_GRADIENT, rectangle, circle, putText, FONT_HERSHEY_COMPLEX_SMALL
import time
def GammaCorrect_CIETrans(rgblist):
gamma, alpha1, alpha2 = 2.4, 0.055, 1.055
thres = 0.04045
m_matrix = np.array([[0.4124564,0.3575761,0.1804375],[0.2126729,0.7151522,0.0721750],[0.0193339,0.1191920,0.9503041]],dtype=np.float)
gamma_arr = np.zeros((3,1),dtype=np.float)
'''
Gamma correction:
'''
for i in range(len(rgblist)):
if float(rgblist[i]) > thres:
gamma_arr[i] = np.power((float(rgblist[i])/255. + alpha1) / alpha2, gamma)
else:
gamma_arr[i] = (float(rgblist[i])/255.) / 12.92
'''
Corrected RGB to XYZ:
'''
XYZ_arr = 100 * np.dot(m_matrix, gamma_arr)
'''
XYZ to xyY:
'''
xyz_s_arr = (1 / (XYZ_arr[0]+XYZ_arr[1]+XYZ_arr[2])) * XYZ_arr
# out_x, out_y, out_Luminance:
return xyz_s_arr[0], xyz_s_arr[1], XYZ_arr[1]
def gamma_streching(img,gamma):
img = np.array(img/255.0, dtype=np.float)
img = np.power(img, gamma)
out = np.array(img*255.0, "uint8")
return out
def proceed(params):
in_dir = str(params.in_dir)
if not str(params.out_dir):
out_dir = str(params.out_dir)
else:
out_dir = str(datetime.now().strftime("%y-%m-%d_%H-%M-%S"))
if not path.exists(out_dir):
makedirs(out_dir)
for imgfile in listdir(in_dir):
in_path = in_dir + '\\' + str(imgfile)
print('Processing:', in_path)
out_picC_path = out_dir + '\\' + str(imgfile).split('.')[0] + '_Circ.tif'
out_picR_path = out_dir + '\\' + str(imgfile).split('.')[0] + '_Rect.tif'
# out_txtC_path = out_dir + '\\' + str(imgfile).split('.')[0] + '_Circ.txt'
out_txtR_path = out_dir + '\\' + str(imgfile).split('.')[0] + '_Rect.txt'
pic = imread(in_path)
picR = deepcopy(pic)
picC = deepcopy(pic)
pic_arr = np.array(pic, dtype=np.uint8)
'''
Extract Green_band / Mix_band
'''
# picG = deepcopy(pic)
# for i in range(pic.shape[2]):
# picG[:,:,i] = pic_arr[:,:,1]
# Mix Color band with Green and Blue in Red band.
picMix = deepcopy(pic)
picMix[:,:,2] = (pic_arr[:,:,0] * 0.7 + pic_arr[:,:,1] * 0.2)
'''
Process gray_pic for circle detection
'''
# blur_pic = cv2.blur(picG, (10,10))
blur_pic = blur(picMix, (10,10))
# blur_pic = cv2.bilateralFilter(picMix, 10, sigmaSpace = 75, sigmaColor =75)
blur_pic = gamma_streching(blur_pic,0.6)
gray_pic = cvtColor(blur_pic, COLOR_BGR2GRAY)
print(' Conducting HoughCircles.')
circles= HoughCircles(
gray_pic,
HOUGH_GRADIENT,
1,
int(params.min_circle_distance), #50, #min circle distance (pixels)
param1=int(params.edge_detect_thres), #Edge HIGH range param
param2=int(params.roundness_thres), #Roundness param
minRadius=int(params.min_circleRadius), #min circle radius
maxRadius=int(params.max_circleRadius) #max circle radius
)
print(' Num of detected circles: ', len(circles[0]))
time_start = time.time()
i = 1
# out_fileC = open(out_txtC_path, 'w')
# out_fileC.write('ID x y r R G B CIE_x CIE_y CIE_Luminance\n')
out_fileR = open(out_txtR_path, 'w')
out_fileR.write('ID x y r R G B CIE_x CIE_y CIE_Luminance\n')
for circle in circles[0]:
x, y, r = int(circle[0]), int(circle[1]), int(circle[2])
'''
Extract Circle's mean RGB value.
'''
# Fy, Fx = np.ogrid[:pic_arr.shape[0], :pic_arr.shape[1]]
# mask = np.sqrt((Fx-x)*(Fx-x) + (Fy-y)*(Fy-y)) <= r
# '''Method 1'''
# mask = np.where(mask==True,1,0).astype(np.uint8)
# sumb, sumg, sumr = np.sum(np.multiply(pic_arr[:,:,0],mask)), np.sum(np.multiply(pic_arr[:,:,1],mask)), np.sum(np.multiply(pic_arr[:,:,2],mask))
# ave_b, ave_g, ave_r = sumb/np.sum(mask==1), sumg/np.sum(mask==1), sumr/np.sum(mask==1)
# '''Method 2'''
# mask = np.where(mask==True)
# sumarr = np.zeros((3),dtype=np.uint64)
# for j in range(mask[0].shape[0]):
# sumarr = sumarr + pic_arr[mask[0][j],mask[1][j],:]
# Circ_ave_b, Circ_ave_g, Circ_ave_r = sumarr / int(mask[0].shape[0])
# '''Method 3'''
# mask = np.where(mask==True,False,True)
# mask_arr_b, mask_arr_g, mask_arr_r = np.ma.masked_array(pic_arr[:,:,0], mask=mask, fill_value=999999), np.ma.masked_array(pic_arr[:,:,1], mask=mask, fill_value=999999), np.ma.masked_array(pic_arr[:,:,2], mask=mask, fill_value=999999)
# ave_b, ave_g, ave_r = mask_arr_b.mean(), mask_arr_g.mean(), mask_arr_r.mean()
'''
Extract Rectangle's mean RGB value.
'''
rect = pic_arr[y-int(r/2):y+int(r/2),x-int(r/2):x+int(r/2),:]
Rect_ave_b, Rect_ave_g, Rect_ave_r = np.average(rect[:,:,0]),np.average(rect[:,:,1]),np.average(rect[:,:,2])
# print('%i: Circle: R:%.2f, G:%.2f, B:%.2f; Rectangle: R:%.2f, G:%.2f, B:%.2f'%(i, Circ_ave_r, Circ_ave_g, Circ_ave_b, Rect_ave_r, Rect_ave_g, Rect_ave_b))
print(' %i: Rectangle: R:%.2f, G:%.2f, B:%.2f'%(i, Rect_ave_r, Rect_ave_g, Rect_ave_b))
'''
Gamma correction & CIE transfer.
'''
# Circ_CIE_x, Circ_CIE_y, Circ_CIE_Luminance = GammaCorrect_CIETrans([Circ_ave_r, Circ_ave_g, Circ_ave_b])
Rect_CIE_x, Rect_CIE_y, Rect_CIE_Luminance = GammaCorrect_CIETrans([Rect_ave_r, Rect_ave_g, Rect_ave_b])
'''
Write result in txt.
'''
# out_fileC.write('%2d %5d %5d %4d %6.2f %6.2f %6.2f %.2f %.2f %.2f\n'%(i, x, y, r, Circ_ave_r, Circ_ave_g, Circ_ave_b, Circ_CIE_x, Circ_CIE_y, Circ_CIE_Luminance))
out_fileR.write('%2d %5d %5d %4d %6.2f %6.2f %6.2f %.2f %.2f %.2f\n'%(i, x, y, r, Rect_ave_r, Rect_ave_g, Rect_ave_b, Rect_CIE_x, Rect_CIE_y, Rect_CIE_Luminance))
'''
Draw Circle.
'''
cv2.circle(picC, (x,y), r, (0,0,255), 3)
# cv2.putText(pic, '%2d'%(i), (x+int(r/2),y+r), cv2.FONT_HERSHEY_COMPLEX_SMALL, 4, (0, 0, 255))
'''
Draw Rectangle
'''
sx1, sx2 = x-int(r/2), x+int(r/2)
sy1, sy2 = y-int(r/2), y+int(r/2)
cv2.rectangle(picR, (sx1, sy1), (sx2, sy2), (0, 0, 255), 3)
if (sx1 > 10):
# cv2.putText(pic, '%2d: (%.2f, %.2f, %.2f)'%(i, CIE_x, CIE_y, CIE_Luminance), (int(sx1),int(sy1-6)), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1.2, (255, 255, 255))
putText(picR, '%2d'%(i), (int(sx1),int(sy1-6)), FONT_HERSHEY_COMPLEX_SMALL, 3, (255, 255, 255))
else:
# cv2.putText(pic, '%2d: (%.2f, %.2f, %.2f)'%(i, CIE_x, CIE_y, CIE_Luminance), (int(sx1),int(sy1+15)), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1.2, (255, 255, 255))
putText(picR, '%2d'%(i), (int(sx1),int(sy1+15)), FONT_HERSHEY_COMPLEX_SMALL, 3, (255, 255, 255))
i += 1
time_end = time.time()
print('Time cost:', (time_end-time_start))
imwrite(out_picC_path, picC)
imwrite(out_picR_path, picR)
def run():
'''
The main function
'''
# Parse parameters
parser = ArgumentParser(
description='Detect Bacteria circles\' location from biochip and output its CIE values in a rectangle area.',
epilog='Developed by xltan, contact me at [email protected]',
formatter_class=RawTextHelpFormatter)
parser.add_argument(
'-i', '--in_dir',
help='input directory of images.',
required=True)
parser.add_argument(
'-o', '--out_dir',
help='output dir of detected images.')
parser.add_argument(
'-d', '--min_circle_distance',
help='Minimum distance of adjacent circles(pixels).(็ธ้ปๅไน้ด็ๆๅฐๅๅฟ่ท็ฆป(ๅ็ด ))',
type=int,
default=60)
parser.add_argument(
'-e', '--edge_detect_thres',
help='Contrast threshold between circle edge and background.(ๅ่พน็ไธ่ๆฏ้ด็ๅฏนๆฏๅบฆ้ๅผ,ๅผ่ถ้ซ,ๅฏนๆฏๅบฆ่ฆๆฑ่ถ้ซ)',
type=int,
default=26)
parser.add_argument(
'-r', '--roundness_thres',
help='Roundness threshold of circles.(ๅๅบฆ้ๅผ,ๅผ่ถ้ซๅๅบฆ่ฆๆฑ่ถ้ซ)',
type=int,
default=31)
parser.add_argument(
'--min_circleRadius',
help='Minimum of circle radius.(ๆฃๆตๅ็ๆๅฐๅๅพ)',
type=int,
default=20)
parser.add_argument(
'--max_circleRadius',
help='Maximum of circle radius.(ๆฃๆตๅ็ๆๅคงๅๅพ)',
type=int,
default=90)
params = parser.parse_args()
proceed(params)
if __name__ == '__main__':
run()
| [
"[email protected]"
] | |
dcc068eccae45881dc3ac6a2cf4b6ada63fdb4e0 | 9cbc8611a5db7c3da21a77ec2ea104dc3a9446ae | /Les 03/2.Lists & numbers.py | 0ea44fd0237e95abc25946add43271d7b20fe1e6 | [] | no_license | xwolfyxNL/Python-Huiswerk | d70bbe1674b57d13187e98eb5003a4173d87a31c | 5445a4cce0f3fb3608cec5e9a0445975db2cd68c | refs/heads/master | 2020-03-29T09:22:34.488540 | 2018-10-22T10:18:33 | 2018-10-22T10:18:33 | 149,754,944 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 57 | py | cijfers = [3, 7, -2, 12]
abs(min(cijfers) - max(cijfers)) | [
"[email protected]"
] | |
9694a496a64eedfdbaec3a322b05e2c6590bc131 | bc709040da3735367414c8f6af1a650424e0cc80 | /exercicios/ex010.py | f3538cca1572d44abcb97fff7b2bca0ea87443ae | [] | no_license | alessandraseitz/Python-3-Mundo-1 | 7c6acd9f4eba7fab2bbf7241ffdd4c0bf07487a9 | e8829d6a383c7da1f4669a8de743f98641740df5 | refs/heads/master | 2022-12-31T23:43:36.996991 | 2020-10-27T00:02:24 | 2020-10-27T00:02:24 | 307,531,852 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 270 | py | #Crie um programa que leia quanto dinheiro uma pessoa tem na carteira e mostre quantos dรณlares ela pode comprar. Considere US$1,00 = R$3,27
real = float(input('Quanto dinheiro vocรช tem na carteira? R$ '))
dolar = real/3.27
print(f'Vocรช tem U${dolar:.2f} dรณlares.')
| [
"[email protected]"
] | |
71b376ed2e27b39a986d6602c059d315078ab265 | 96f2c34acf4e2071b174aca01c1371aea78ef409 | /TransitionMat_SingleCell/stablefactors.py | 9902c747fd6eb096619f3317e478cb84960af93a | [] | no_license | neelayjunnarkar/CapstoneModelingProject | fd554ba7cce80ee00d37c1d57469fc2e5805f855 | 088092fd4d591e05871d59fdeb92e7b50dbe2e34 | refs/heads/master | 2020-12-24T13:17:55.398580 | 2016-03-15T18:20:52 | 2016-03-15T18:20:52 | 42,912,512 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 649 | py |
def genstable_e(l, ss, g):
'''
Use to generate a stable e value given values for l, ss, and g
'''
return (-(l-1)*((g-1)*ss+1))/(g)
def genstable_g(l,ss,e):
'''
Use to generate a stable g value given values for l, ss, and e
'''
return (float(l)-1)*(float(ss)-1)/((float(l)-1)*float(ss)+float(e))
def genstable_ss(l,g,e):
'''
Use to generate a stable ss value given values for l, g, and e
'''
return (-(e*g+l-1))/((g-1)*(l-1))
def genstable_l(ss,g,e):
'''
Use to generate a stable l value given values for ss, g, and e
'''
return 1-(float(e)*float(g))/((float(g)-1)*float(ss)+1) | [
"[email protected]"
] | |
211caa9aae58973aba2e7ba89e08c08a42df71b5 | b144c5142226de4e6254e0044a1ca0fcd4c8bbc6 | /ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocolstack/vlanrange_afd81185ed8c3bb5459ca36e2f1f1e6a.py | 17a06921e97b68ca50fdde1a771fb8afdae83487 | [
"MIT"
] | permissive | iwanb/ixnetwork_restpy | fa8b885ea7a4179048ef2636c37ef7d3f6692e31 | c2cb68fee9f2cc2f86660760e9e07bd06c0013c2 | refs/heads/master | 2021-01-02T17:27:37.096268 | 2020-02-11T09:28:15 | 2020-02-11T09:28:15 | 239,721,780 | 0 | 0 | NOASSERTION | 2020-02-11T09:20:22 | 2020-02-11T09:20:21 | null | UTF-8 | Python | false | false | 12,376 | py | # MIT LICENSE
#
# Copyright 1997 - 2019 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class VlanRange(Base):
"""
The VlanRange class encapsulates a required vlanRange resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'vlanRange'
def __init__(self, parent):
super(VlanRange, self).__init__(parent)
@property
def VlanIdInfo(self):
"""An instance of the VlanIdInfo class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.vlanidinfo_dcbd732ee1f6f51f5677e7e4d7c7e5d1.VlanIdInfo)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.vlanidinfo_dcbd732ee1f6f51f5677e7e4d7c7e5d1 import VlanIdInfo
return VlanIdInfo(self)
@property
def Enabled(self):
"""Disabled ranges won't be configured nor validated.
Returns:
bool
"""
return self._get_attribute('enabled')
@Enabled.setter
def Enabled(self, value):
self._set_attribute('enabled', value)
@property
def FirstId(self):
"""DEPRECATED The first ID to be used for the first VLAN tag.
Returns:
number
"""
return self._get_attribute('firstId')
@FirstId.setter
def FirstId(self, value):
self._set_attribute('firstId', value)
@property
def IdIncrMode(self):
"""Method used to increment VLAN IDs. May take the following values: 0 (First VLAN first), 1 (Last VLAN first), 2 (All).
Returns:
number
"""
return self._get_attribute('idIncrMode')
@IdIncrMode.setter
def IdIncrMode(self, value):
self._set_attribute('idIncrMode', value)
@property
def Increment(self):
"""DEPRECATED Amount of increment per increment step for first VLAN. E.g. increment step = 10 and increment = 2 means increment VLAN ID by 2 for every 10 IPs
Returns:
number
"""
return self._get_attribute('increment')
@Increment.setter
def Increment(self, value):
self._set_attribute('increment', value)
@property
def IncrementStep(self):
"""DEPRECATED Frequency of first VLAN ID increment. E.g., value of 10 means increment VLAN ID once for every 10 IP addresses.
Returns:
number
"""
return self._get_attribute('incrementStep')
@IncrementStep.setter
def IncrementStep(self, value):
self._set_attribute('incrementStep', value)
@property
def InnerEnable(self):
"""DEPRECATED Enable the inner VLAN.
Returns:
bool
"""
return self._get_attribute('innerEnable')
@InnerEnable.setter
def InnerEnable(self, value):
self._set_attribute('innerEnable', value)
@property
def InnerFirstId(self):
"""DEPRECATED The first ID to be used for the inner VLAN tag.
Returns:
number
"""
return self._get_attribute('innerFirstId')
@InnerFirstId.setter
def InnerFirstId(self, value):
self._set_attribute('innerFirstId', value)
@property
def InnerIncrement(self):
"""DEPRECATED Amount of increment per increment step for Inner VLAN. E.g. increment step = 10 and increment = 2 means increment VLAN ID by 2 for every 10 IPs
Returns:
number
"""
return self._get_attribute('innerIncrement')
@InnerIncrement.setter
def InnerIncrement(self, value):
self._set_attribute('innerIncrement', value)
@property
def InnerIncrementStep(self):
"""DEPRECATED Frequency of inner VLAN ID increment. E.g., value of 10 means increment VLAN ID once for every 10 IP addresses.
Returns:
number
"""
return self._get_attribute('innerIncrementStep')
@InnerIncrementStep.setter
def InnerIncrementStep(self, value):
self._set_attribute('innerIncrementStep', value)
@property
def InnerPriority(self):
"""DEPRECATED The 802.1Q priority to be used for the inner VLAN tag.
Returns:
number
"""
return self._get_attribute('innerPriority')
@InnerPriority.setter
def InnerPriority(self, value):
self._set_attribute('innerPriority', value)
@property
def InnerTpid(self):
"""DEPRECATED The TPID value in the inner VLAN Tag.
Returns:
str
"""
return self._get_attribute('innerTpid')
@InnerTpid.setter
def InnerTpid(self, value):
self._set_attribute('innerTpid', value)
@property
def InnerUniqueCount(self):
"""DEPRECATED Number of unique inner VLAN IDs to use.
Returns:
number
"""
return self._get_attribute('innerUniqueCount')
@InnerUniqueCount.setter
def InnerUniqueCount(self, value):
self._set_attribute('innerUniqueCount', value)
@property
def Name(self):
"""Name of range
Returns:
str
"""
return self._get_attribute('name')
@Name.setter
def Name(self, value):
self._set_attribute('name', value)
@property
def ObjectId(self):
"""Unique identifier for this object
Returns:
str
"""
return self._get_attribute('objectId')
@property
def Priority(self):
"""DEPRECATED The 802.1Q priority to be used for the outer VLAN tag.
Returns:
number
"""
return self._get_attribute('priority')
@Priority.setter
def Priority(self, value):
self._set_attribute('priority', value)
@property
def Tpid(self):
"""DEPRECATED The TPID value in the outer VLAN Tag.
Returns:
str
"""
return self._get_attribute('tpid')
@Tpid.setter
def Tpid(self, value):
self._set_attribute('tpid', value)
@property
def UniqueCount(self):
"""DEPRECATED Number of unique first VLAN IDs to use.
Returns:
number
"""
return self._get_attribute('uniqueCount')
@UniqueCount.setter
def UniqueCount(self, value):
self._set_attribute('uniqueCount', value)
def update(self, Enabled=None, FirstId=None, IdIncrMode=None, Increment=None, IncrementStep=None, InnerEnable=None, InnerFirstId=None, InnerIncrement=None, InnerIncrementStep=None, InnerPriority=None, InnerTpid=None, InnerUniqueCount=None, Name=None, Priority=None, Tpid=None, UniqueCount=None):
"""Updates a child instance of vlanRange on the server.
Args:
Enabled (bool): Disabled ranges won't be configured nor validated.
FirstId (number): The first ID to be used for the first VLAN tag.
IdIncrMode (number): Method used to increment VLAN IDs. May take the following values: 0 (First VLAN first), 1 (Last VLAN first), 2 (All).
Increment (number): Amount of increment per increment step for first VLAN. E.g. increment step = 10 and increment = 2 means increment VLAN ID by 2 for every 10 IPs
IncrementStep (number): Frequency of first VLAN ID increment. E.g., value of 10 means increment VLAN ID once for every 10 IP addresses.
InnerEnable (bool): Enable the inner VLAN.
InnerFirstId (number): The first ID to be used for the inner VLAN tag.
InnerIncrement (number): Amount of increment per increment step for Inner VLAN. E.g. increment step = 10 and increment = 2 means increment VLAN ID by 2 for every 10 IPs
InnerIncrementStep (number): Frequency of inner VLAN ID increment. E.g., value of 10 means increment VLAN ID once for every 10 IP addresses.
InnerPriority (number): The 802.1Q priority to be used for the inner VLAN tag.
InnerTpid (str): The TPID value in the inner VLAN Tag.
InnerUniqueCount (number): Number of unique inner VLAN IDs to use.
Name (str): Name of range
Priority (number): The 802.1Q priority to be used for the outer VLAN tag.
Tpid (str): The TPID value in the outer VLAN Tag.
UniqueCount (number): Number of unique first VLAN IDs to use.
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
self._update(locals())
def CustomProtocolStack(self, *args, **kwargs):
"""Executes the customProtocolStack operation on the server.
Create custom protocol stack under /vport/protocolStack
customProtocolStack(Arg2:list, Arg3:enum)
Args:
args[0] is Arg2 (list(str)): List of plugin types to be added in the new custom stack
args[1] is Arg3 (str(kAppend|kMerge|kOverwrite)): Append, merge or overwrite existing protocol stack
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('customProtocolStack', payload=payload, response_object=None)
def DisableProtocolStack(self, *args, **kwargs):
"""Executes the disableProtocolStack operation on the server.
Disable a protocol under protocolStack using the class name
disableProtocolStack(Arg2:string)string
Args:
args[0] is Arg2 (str): Protocol class name to disable
Returns:
str: Status of the exec
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('disableProtocolStack', payload=payload, response_object=None)
def EnableProtocolStack(self, *args, **kwargs):
"""Executes the enableProtocolStack operation on the server.
Enable a protocol under protocolStack using the class name
enableProtocolStack(Arg2:string)string
Args:
args[0] is Arg2 (str): Protocol class name to enable
Returns:
str: Status of the exec
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('enableProtocolStack', payload=payload, response_object=None)
| [
"[email protected]"
] | |
31b4141a873aabaddebfc4f7cabbd48c62df8a2a | af0371bae18b35cddfedff5b0691bee6e0a49fa0 | /blog/views.py | 200c57024f905596eb914dccc64dfbb7cc69c71f | [] | no_license | loveAlakazam/myFirstDjango | 4f0eaab72033cc1ec37af5ab38b7bb51ddb54437 | 254e673e16ae63813283a8f2ca11221013c71e91 | refs/heads/master | 2022-04-13T09:18:09.627762 | 2020-04-12T17:52:15 | 2020-04-12T17:52:15 | 255,072,556 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,608 | py | from django.shortcuts import render
from .models import Post
from django.utils import timezone
from django.shortcuts import redirect, render, get_object_or_404 #๋ผ์ด๋ธ๋ฌ๋ฆฌ ์ถ๊ฐ
from .forms import PostForm
def post_list(request):
posts= Post.objects.order_by('published_date')
return render(request, 'blog/post_list.html', {'posts': posts})
def post_detail(request, pk):
post= get_object_or_404(Post, pk=pk)
return render(request, 'blog/post_detail.html', {'post':post})
def post_new(request):
form=PostForm(request.POST)
if form.is_valid():
post=form.save(commit=False) # ์์ง ์ ์ฅํ์ง ์์ ์ํ
post.author=request.user # ์์ฑ์ ๋ฑ๋ก
post.publised_date=timezone.now() # published_date ๋ ์ง ๋ฑ๋ก
post.save() # author, publised_date ๊ฐ ๋ฑ๋กํ์ ์ ์ฅ.
return redirect('post_detail', pk=post.pk) # post_detail ํ์ด์ง๋ก ๊ฐ๋ค.
else:
form=PostForm()
return render(request, 'blog/post_edit.html', {'form':form})
def post_edit(request, pk):
post=get_object_or_404(Post, pk=pk)
if request.method=='POST':
form= PostForm(request.POST, instance=post) # PostForm(request.POST, instance=post)
if form.is_valid():
post=form.save(commit=False)
post.author=request.user
post.published_date=timezone.now()
post.save()
return redirect('post_detail', pk=post.pk)
else:
form=PostForm(instance=post)
return render(request, 'blog/post_edit.html', {'form':form})
| [
"[email protected]"
] | |
5a9597bd786db3a88a6fc2f92ea4a216148767c8 | 909204353bf76ac66f12c99759361c3bb6a4c6c1 | /TriggerProjectiles.py | 0c593ce0bbfd91e2cb949c2cd4ed353dd3aa6a0f | [] | no_license | LoomAcademics/pygame-assignment | 32e9af11a25f9e8503d66124efef56777add6f6f | d89e0cda478b335d7e24ec3fd45acab92fe54514 | refs/heads/master | 2021-01-12T14:36:15.609002 | 2016-10-26T21:17:15 | 2016-10-26T21:17:15 | 72,035,959 | 0 | 4 | null | null | null | null | UTF-8 | Python | false | false | 2,000 | py | import pygame, sys
from pygame.locals import *
from Projectile import *
class Projectile:
def __init__(self, x, y):
self.color = (253, 204, 17)
self.x = x
self.y = y
self.speed = -5
self.isDead = False
def update(self):
self.y += self.speed
if self.y <= self.speed:
self.isDead = True
def display(self):
pygame.draw.line(screen, self.color, (self.x, self.y), (self.x, self.y - self.speed))
class Spaceship:
def __init__(self):
self.color = (255, 255, 255)
self.x = 250
self.y = 400
self.w = 25
self.h = 40
def update(self):
pos = pygame.mouse.get_pos()
self.x = pos[0]
def display(self):
offset = self.w / 2
pygame.draw.rect(screen, self.color, (self.x - offset, self.y, self.w, self.h))
if __name__ == '__main__':
pygame.init()
width = 500
height = 500
screen = pygame.display.set_mode((width, height))
pygame.display.set_caption('Old School Video Game')
clock = pygame.time.Clock()
player = Spaceship()
projectileList = []
hasFired = False
while True:
# Re-initialize
hasFired = False
# Handle Events
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_a:
hasFired = True
# Update logic
player.update()
if hasFired:
projectileList.append(Projectile(player.x, 400))
for p in projectileList:
p.update()
if p.isDead:
projectileList.remove(p)
# Display
screen.fill((0, 0, 0))
player.display()
for p in projectileList:
p.display()
pygame.display.update()
print(len(projectileList))
clock.tick_busy_loop(60)
| [
"[email protected]"
] | |
a5f368ea34dd6b85d222ab23cab1da116907cf7f | 4a3f8ce7daa54a51a93bae9bd6b4a48d1c9cb225 | /LinkedList.py | 4ac2ba792f00af88cc68d5fb0d72de2562835c83 | [] | no_license | KmrAnil/Python | bf119ee19f2c16c5619a64520aef2ac7f3668afa | 3a193f3eedf3288f45a12daed5703bfff1ae2d92 | refs/heads/master | 2020-06-26T13:46:06.192515 | 2019-08-07T15:14:39 | 2019-08-07T15:14:39 | 199,648,245 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,915 | py | class Node:
def __init__(self,data):
self.data=data
self.next=None
class LinkedList:
def __init__(self):
self.head=None
self.tail=None
#insert Node at the end of linked List
def insertNodeAtTail(self,new_node_data):
new_node = Node(new_node_data)
if self.head ==None:
self.head = new_node
else:
self.tail.next= new_node
self.tail =new_node
#insert node At the head of linked list
def insertNodeAtHead(self,new_node_data):
new_node=Node(new_node_data)
if self.head == Node:
self.head =new_node
else:
new_node.next=self.head
self.head=new_node
return self.head
#insert node at specific position
def insertAtSpecificPosition(self,new_node_data,position):
new_node=Node(new_node_data)
temp=self.head
if position==0:
new_node.next=temp
self.head=new_node
return
i=1
while i!=position:
temp=temp.next
i+=1
new_node.next=temp.next
temp.next=new_node
#print the element of linked list
def printll(self):
temp=self.head
while temp != None:
print(temp.data)
temp=temp.next
#Find the length of Linked List
def countll(self):
i=0
temp=self.head
while temp !=None:
temp=temp.next
i+=1
return i
#reverse the linked List
def reversell(self):
current=self.head
prev=None
while current!=None:
next =current.next
current.next =prev
prev=current
current=next
self.head = prev
return prev
# delete node at particular position
def deletell(self,position):
temp=self.head
if position ==0:
self.head=temp.next
return
i =1
while i!=position:
temp=temp.next
i+=1
temp.next = temp.next.next
#remove duplicate element from LinkedList
def removeDuplicates(self):
copy =self.head
temp =self.head.next
while copy.next!=None:
if copy.data == temp.data:
copy.next =temp.next
temp=temp.next
else:
copy=copy.next
temp=temp.next
return self.head
#Linear Search in linked list to search for a element in linked list
def searchll(self,value):
count =0
temp =self.head
while temp!=None:
if temp.data == value:
count=1
break
else:
temp=temp.next
if count ==1:
print("Element Exist")
else:
print("Element Not Exist")
#Bubble sort on Linked List
def sort (self):
if self.head !=None:
i = self.head
j =self.head.next
while i!=None:
j=i.next
while j!=None:
if i.data >j.data:
temp =i.data
i.data=j.data
j.data=temp
j=j.next
i=i.next
#check linked list is palindrome or not
def isPalindrome():
temp = self.head
t1 =0
t2=0
n =1
while temp != None:
t1 = t1 + temp.data*n
t2 =t2*10 +temp.data
temp=temp.next
n=n*10
if t1 == t2:
return 1
else:
return 0
if __name__ == "__main__":
llist = LinkedList()
n = int(input("Enter the no of element of linked list"))
for i in range(0,n):
elt = int(input())
llist.insertNodeAtTail(elt)
llist.insertNodeAtHead(int(input("Enter the element insert at head")))
llist.countll()
llist.reversell()
llist.deletell(int(input("Enter the position of element u want to delete")))
value,position = map(int,input("Enter the value and position where element to be insert").split())
llist.insertAtSpecificPosition(value,position)
llist.removeDuplicates()
llist.searchll(int(input("Enter the value to be search")))
llist.sort()
llist.printll()
print(isPalindrome)
| [
"[email protected]"
] | |
d15ad3682c2dd238d60b3a61473c8c144f95cfab | d143cf96565ae5696b4836ffb0953f199bf3d6d3 | /debug_rjmc | f171b8659b62ab97ba9c7dc482ff212783833db7 | [] | no_license | ocmadin/RJMC_LJ_Ethane | 6e90b6614df3ceedbdf2f1862af03bd0d5c05dc7 | c95c66a06ad44c023258f5d5f7635c96a3c681fc | refs/heads/master | 2021-06-11T01:49:49.327485 | 2019-11-19T20:06:50 | 2019-11-19T20:06:50 | 115,141,168 | 0 | 0 | null | 2019-11-19T20:06:51 | 2017-12-22T18:36:10 | Python | UTF-8 | Python | false | false | 1,881 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 9 17:16:05 2019
@author: owenmadin
"""
from __future__ import division
import numpy as np
import argparse
import scipy as sp
import matplotlib.pyplot as plt
import pandas as pd
import yaml
from LennardJones_correlations import LennardJones
from LennardJones_2Center_correlations import LennardJones_2C
from scipy.stats import distributions
from scipy.stats import linregress
from scipy.optimize import minimize
import random as rm
from pymc3.stats import hpd
from RJMC_auxiliary_functions import *
from datetime import date
import copy
from pymbar import BAR,timeseries
import random
import sys
from RJMC_2CLJQ_OOP import RJMC_Simulation,RJMC_Prior
def main()
compound='C2H6'
properties='rhol+Psat'
T_range=[0.55,0.95]
n_points=10
swap_freq=0.1
stesp=1*10**5
biasing_factor=[0,0,0]
optimum_matching=['True','True']
prior_values ={
'epsilon': ['exponential',[400]],
'sigma': ['exponential',[5]],
'L': ['exponential',[3]],
'Q': ['exponential',[1]]}
prior = RJMC_Prior(prior_values)
prior.epsilon_prior()
prior.sigma_prior()
prior.L_prior()
prior.Q_prior()
rjmc_simulator = RJMC_Simulation(compound,T_range,properties,n_points,steps,swap_freq,biasing_factor,optimum_matching)
rjmc_simulator.prepare_data()
print(rjmc_simulator.get_attributes())
compound_2CLJ = LennardJones_2C(rjmc_simulator.M_w)
rjmc_simulator.set_initial_state(prior,compound_2CLJ)
rjmc_simulator.gen_Tmatrix(prior,compound_2CLJ)
rjmc_simulator.RJMC_Outerloop(prior,compound_2CLJ)
trace,logp_trace,percent_dev_trace=rjmc_simulator.Report()
return trace, logp_trace,percent_dev_trace
if __name__ == '__main__':
trace, logp_trace,percent_dev_trace=main() | [
"[email protected]"
] | ||
0c0bb901ffc02ac468d92195b334cdef296ff75b | d58cf76ddd9982297001408f2a4c81002f49a832 | /main.py | 9388a904227d9f62132c934882347418da33f3bb | [] | no_license | fpadula/gesture_recognition | 9b8856f38dcbf75b57b8db539473df51ef63df83 | 934346c45c6836ca2a77f2455870df5a26b673a1 | refs/heads/master | 2023-08-12T11:56:25.474104 | 2021-09-24T18:27:11 | 2021-09-24T18:27:11 | 410,062,842 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,141 | py | from GestureReader import GestureReader
from SimpleStateMachine import SimpleStateMachine
import cv2 as cv
def main():
sm = SimpleStateMachine()
gr = GestureReader()
# Opencv configs:
cap_device = 0
cap_width = 960
cap_height = 540
cap = cv.VideoCapture(cap_device)
cap.set(cv.CAP_PROP_FRAME_WIDTH, cap_width)
cap.set(cv.CAP_PROP_FRAME_HEIGHT, cap_height)
sm.start()
while True:
key = cv.waitKey(10)
if key == 27: # ESC
break
# Camera capture
ret, image = cap.read()
if not ret:
break
detected_gesture, debug_image = gr.detect_gesture(image)
if(detected_gesture != None):
if detected_gesture == "One":
sm.perform_action("perform action 1")
elif detected_gesture == "Two":
sm.perform_action("perform action 2")
elif detected_gesture == "Open":
sm.perform_action("stop")
cv.imshow('Hand Gesture Recognition', debug_image)
sm.stop()
cap.release()
cv.destroyAllWindows()
if __name__ == "__main__":
main() | [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.