blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6f72d57c43b8fe76cd74819a7a83fffb1ad3bf93
|
9f6e090c2cfd56979997cfde82b288e83e916677
|
/djangonautic/urls.py
|
babe920e58533a79e39dc3eddaed7be8a04e7947
|
[] |
no_license
|
F13RC3/djangonautic
|
73a26bde787041dfc8c1946948173a1c6a1112d7
|
12a6aa21c3500ad0dfab84cadb1a67b6cb68c523
|
refs/heads/master
| 2020-07-05T05:16:15.707209 | 2019-08-15T12:10:20 | 2019-08-15T12:10:20 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 701 |
py
|
from django.contrib import admin
from django.urls import path, re_path
from django.conf.urls import url,include
from . import views
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.conf.urls.static import static
from django.conf import settings
from articles import views as article_views
urlpatterns = [
re_path(r'^admin/', admin.site.urls),
re_path(r'^accounts/',include('accounts.urls')),
re_path(r'^articles/', include('articles.urls')),
re_path(r'^about/$',views.about),
re_path(r'^$',article_views.article_list, name='home')
]
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"[email protected]"
] | |
ad3f2b56cdbdd7f80247eb8a1a6fb70b31c024d4
|
f15d24ce70ba36ec709b6a9c024725bde7082955
|
/T_others_to_c.py
|
cb1a4ab318669a840ad5b68e0a23279421b5a0bd
|
[] |
no_license
|
fdc227/AIRCRAFT_MODEL_RELEASE
|
4a1e6e3ee1b791561618f63f5b7de8df9015308c
|
96747e44a76855c7e0a62da986c5ea73f8aab07c
|
refs/heads/master
| 2020-07-09T20:59:16.088364 | 2019-10-07T13:22:40 | 2019-10-07T13:22:40 | 204,082,790 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,720 |
py
|
import pickle
from sympy import *
from sympy.printing.ccode import C99CodePrinter
from sympy.printing.codeprinter import Assignment
from iseven import iseven
import sys
print(sys.argv[1])
print(sys.argv[2])
num_of_elements, num_of_processes = int(sys.argv[1]), int(sys.argv[2])
# num_of_elements = total number of finite element sections on two beams
# num_of_processes = number of processes of CPU processes for the function, multiprocessing is assumed
if not iseven(num_of_elements):
raise Exception ("Number of finite beam elements must be even")
else:
np = num_of_elements // 2
nq = num_of_elements // 2 + 1
t = symbols('t')
x, w, L, theta_0 = symbols('x, w, L, theta_0')
M, m, x, y, z, g, h, E, I, G, J, x_f, c, s, K = symbols('M, m, x, y, z, g, h, E, I, G, J, x_f, c, s, K')
rho, V, a_w, gamma, M_thetadot, e = symbols('rho, V, a_w, gamma, M_thetadot, e')
beta, P, Q, R = symbols('beta, P, Q, R')
W_x, W_y, W_z = symbols('W_x, W_y, W_z')
P_s, gamma_alpha = symbols('P_s, gamma_alpha')
A = symbols('A')
theta = symbols('theta')
phi = symbols('phi')
psi = symbols('psi')
X = symbols('X')
Y = symbols('Y')
Z = symbols('Z')
short_var_list = [theta, phi, psi, X, Y, Z]
theta_dt = symbols('theta_dt')
phi_dt = symbols('phi_dt')
psi_dt = symbols('psi_dt')
X_dt = symbols('X_dt')
Y_dt = symbols('Y_dt')
Z_dt = symbols('Z_dt')
short_var_list_dt = [theta_dt, phi_dt, psi_dt, X_dt, Y_dt, Z_dt]
theta_dt_dt = symbols('theta_dt_dt')
phi_dt_dt = symbols('phi_dt_dt')
psi_dt_dt = symbols('psi_dt_dt')
X_dt_dt = symbols('X_dt_dt')
Y_dt_dt = symbols('Y_dt_dt')
Z_dt_dt = symbols('Z_dt_dt')
short_var_list_dt_dt = [theta_dt_dt, phi_dt_dt, psi_dt_dt, X_dt_dt, Y_dt_dt, Z_dt_dt]
var_q_bending = []
for i in range(np, 0, -1):
globals()[f'p{i}_b'] = symbols(f'p{i}_b')
var_q_bending.append(globals()[f'p{i}_b'])
for i in range(1, nq):
globals()[f'q{i}_b'] = symbols(f'q{i}_b')
var_q_bending.append(globals()[f'q{i}_b'])
var_q_bending_dot = []
for i in range(np, 0, -1):
globals()[f'p{i}_b_dot'] = symbols(f'p{i}_b_dot')
var_q_bending_dot.append(globals()[f'p{i}_b_dot'])
for i in range(1, nq):
globals()[f'q{i}_b_dot'] = symbols(f'q{i}_b_dot')
var_q_bending_dot.append(globals()[f'q{i}_b_dot'])
var_q_torsion = []
for i in range(np, 0, -1):
globals()[f'p{i}_t'] = symbols(f'p{i}_t')
var_q_torsion.append(globals()[f'p{i}_t'])
for i in range(1, nq):
globals()[f'q{i}_t'] = symbols(f'q{i}_t')
var_q_torsion.append(globals()[f'q{i}_t'])
var_q_inplane = []
for i in range(np, 0, -1):
globals()[f'p{i}_i'] = symbols(f'p{i}_i')
var_q_inplane.append(globals()[f'p{i}_i'])
for i in range(1, nq):
globals()[f'q{i}_i'] = symbols(f'q{i}_i')
var_q_inplane.append(globals()[f'q{i}_i'])
var_q_inplane_dot = []
for i in range(np, 0, -1):
globals()[f'p{i}_i_dot'] = symbols(f'p{i}_i_dot')
var_q_inplane_dot.append(globals()[f'p{i}_i_dot'])
for i in range(1, nq):
globals()[f'q{i}_i_dot'] = symbols(f'q{i}_i_dot')
var_q_inplane_dot.append(globals()[f'q{i}_i_dot'])
var_q_list = [*var_q_bending, *var_q_bending_dot, *var_q_torsion, *var_q_inplane, *var_q_inplane_dot]
var_q_bending_dt = []
for i in range(np, 0, -1):
globals()[f'p{i}_b_dt'] = symbols(f'p{i}_b_dt')
var_q_bending_dt.append(globals()[f'p{i}_b_dt'])
for i in range(1, nq):
globals()[f'q{i}_b_dt'] = symbols(f'q{i}_b_dt')
var_q_bending_dt.append(globals()[f'q{i}_b_dt'])
var_q_bending_dot_dt = []
for i in range(np, 0, -1):
globals()[f'p{i}_b_dot_dt'] = symbols(f'p{i}_b_dot_dt')
var_q_bending_dot_dt.append(globals()[f'p{i}_b_dot_dt'])
for i in range(1, nq):
globals()[f'q{i}_b_dot_dt'] = symbols(f'q{i}_b_dot_dt')
var_q_bending_dot_dt.append(globals()[f'q{i}_b_dot_dt'])
var_q_torsion_dt = []
for i in range(np, 0, -1):
globals()[f'p{i}_t_dt'] = symbols(f'p{i}_t_dt')
var_q_torsion_dt.append(globals()[f'p{i}_t_dt'])
for i in range(1, nq):
globals()[f'q{i}_t_dt'] = symbols(f'q{i}_t_dt')
var_q_torsion_dt.append(globals()[f'q{i}_t_dt'])
var_q_inplane_dt = []
for i in range(np, 0, -1):
globals()[f'p{i}_i_dt'] = symbols(f'p{i}_i_dt')
var_q_inplane_dt.append(globals()[f'p{i}_i_dt'])
for i in range(1, nq):
globals()[f'q{i}_i_dt'] = symbols(f'q{i}_i_dt')
var_q_inplane_dt.append(globals()[f'q{i}_i_dt'])
var_q_inplane_dot_dt = []
for i in range(np, 0, -1):
globals()[f'p{i}_i_dot_dt'] = symbols(f'p{i}_i_dot_dt')
var_q_inplane_dot_dt.append(globals()[f'p{i}_i_dot_dt'])
for i in range(1, nq):
globals()[f'q{i}_i_dot_dt'] = symbols(f'q{i}_i_dot_dt')
var_q_inplane_dot_dt.append(globals()[f'q{i}_i_dot_dt'])
var_q_list_dt = [*var_q_bending_dt, *var_q_bending_dot_dt, *var_q_torsion_dt, *var_q_inplane_dt, *var_q_inplane_dot_dt]
var_q_bending_dt_dt = []
for i in range(np, 0, -1):
globals()[f'p{i}_b_dt_dt'] = symbols(f'p{i}_b_dt_dt')
var_q_bending_dt_dt.append(globals()[f'p{i}_b_dt_dt'])
for i in range(1, nq):
globals()[f'q{i}_b_dt_dt'] = symbols(f'q{i}_b_dt_dt')
var_q_bending_dt_dt.append(globals()[f'q{i}_b_dt_dt'])
var_q_bending_dot_dt_dt = []
for i in range(np, 0, -1):
globals()[f'p{i}_b_dot_dt_dt'] = symbols(f'p{i}_b_dot_dt_dt')
var_q_bending_dot_dt_dt.append(globals()[f'p{i}_b_dot_dt_dt'])
for i in range(1, nq):
globals()[f'q{i}_b_dot_dt_dt'] = symbols(f'q{i}_b_dot_dt_dt')
var_q_bending_dot_dt_dt.append(globals()[f'q{i}_b_dot_dt_dt'])
var_q_torsion_dt_dt = []
for i in range(np, 0, -1):
globals()[f'p{i}_t_dt_dt'] = symbols(f'p{i}_t_dt_dt')
var_q_torsion_dt_dt.append(globals()[f'p{i}_t_dt_dt'])
for i in range(1, nq):
globals()[f'q{i}_t_dt_dt'] = symbols(f'q{i}_t_dt_dt')
var_q_torsion_dt_dt.append(globals()[f'q{i}_t_dt_dt'])
var_q_inplane_dt_dt = []
for i in range(np, 0, -1):
globals()[f'p{i}_i_dt_dt'] = symbols(f'p{i}_i_dt_dt')
var_q_inplane_dt_dt.append(globals()[f'p{i}_i_dt_dt'])
for i in range(1, nq):
globals()[f'q{i}_i_dt_dt'] = symbols(f'q{i}_i_dt_dt')
var_q_inplane_dt_dt.append(globals()[f'q{i}_i_dt_dt'])
var_q_inplane_dot_dt_dt = []
for i in range(np, 0, -1):
globals()[f'p{i}_i_dot_dt_dt'] = symbols(f'p{i}_i_dot_dt_dt')
var_q_inplane_dot_dt_dt.append(globals()[f'p{i}_i_dot_dt_dt'])
for i in range(1, nq):
globals()[f'q{i}_i_dot_dt_dt'] = symbols(f'q{i}_i_dot_dt_dt')
var_q_inplane_dot_dt_dt.append(globals()[f'q{i}_i_dot_dt_dt'])
var_q_list_dt_dt = [*var_q_bending_dt_dt, *var_q_bending_dot_dt_dt, *var_q_torsion_dt_dt, *var_q_inplane_dt_dt, *var_q_inplane_dot_dt_dt]
q_list = [*short_var_list, *var_q_list]
q_list_dt = [*short_var_list_dt, *var_q_list_dt]
q_list_dt_dt = [*short_var_list_dt_dt, *var_q_list_dt_dt]
y_sym = [*q_list, *q_list_dt]
str1_list = []
for i in range(len(y_sym)):
str1_list.append('double ' + str(y_sym[i]) + '=' + f'state_var[{i}];')
str1 = '\n'.join(str1_list)
print('str1 generated')
T_raw = open('T_others.pkl', 'rb')
T_others = Matrix(pickle.load(T_raw))
class CMatrixPrinter(C99CodePrinter):
def _print_ImmutableDenseMatrix(self, expr):
sub_exprs, simplified = cse(expr)
lines = []
for var, sub_expr in sub_exprs:
lines.append('double ' + self._print(Assignment(var, sub_expr)))
M = MatrixSymbol('T_others', *expr.shape)
return '\n'.join(lines) + '\n' + self._print(Assignment(M, simplified[0]))
p = CMatrixPrinter()
str2 = p.doprint(T_others)
print('str2 generated')
str3 = str1 + '\n' + str2
str0 = '#include <iostream>'+'\n'+'#include <cmath>'+'\n'+'#include "parameters.h"'+'\n'+'\n'+'void T_others_f(double* state_var, double* T_others)'+'\n'+'{\n'
str_end = '\n}'
str_final = str0 + str3 + str_end
T_c = open('T_others_c.cpp', 'w')
T_c.write(str_final)
T_c.close()
|
[
"[email protected]"
] | |
5837d24747eb111593c4fdc4fdb16c2048efb91e
|
d3e6d6555b0314936902727af36de2f1b7432bf8
|
/linked-list-cycle/linked-list-cycle.py
|
af4d1032682c25c061b7019097dc1288fceab653
|
[] |
no_license
|
fly2rain/LeetCode
|
624b1e06e1aa3174dfb5c81834b58cc8fd7ad073
|
4ddb5a051c6e2051f016a675fd2f5d566c800c2a
|
refs/heads/master
| 2021-01-18T03:12:22.402044 | 2015-12-28T04:31:19 | 2015-12-28T04:31:19 | 85,842,050 | 0 | 1 | null | 2017-03-22T15:05:20 | 2017-03-22T15:05:19 | null |
UTF-8
|
Python
| false | false | 771 |
py
|
from utils import ListNode
class Solution(object):
def hasCycle(self, head):
"""
:type head: ListNode
:rtype: bool
"""
if not head:
return False
prev, current = head, head.next
head.next = None
while current:
if current == head:
return True
next = current.next
current.next = prev
prev, current = current, next
return False
if __name__ == '__main__':
head = ListNode.build_linked_list([1, 2, 3, 4, 5])
head.next.next.next.next = head.next.next
print Solution().hasCycle(head)
head2 = ListNode.build_linked_list([1, 2, 3, 4, 5])
print Solution().hasCycle(head2)
print Solution().hasCycle(None)
|
[
"[email protected]"
] | |
33302759c219b9a3b1fe2347ecb502a4dace1d4d
|
fc0150b1fd6ba0efd6746a34ffa8cba01640d10e
|
/Programming Basics with Python - април 2018/04. Complex-Conditions/02. Small Shop.py
|
f98d0d795257e24d58dfce9db983b1cd9ca6dbeb
|
[] |
no_license
|
vgrozev/SofUni_Python_hmwrks
|
7554d90f93b83d58e386c92dac355573c8cda848
|
b10a941a0195ea069e698b319f293f5b4a660547
|
refs/heads/master
| 2021-06-08T19:40:27.009205 | 2019-11-24T17:19:31 | 2019-11-24T17:19:31 | 95,629,443 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,132 |
py
|
product = input().lower()
town = input().lower()
quantity = float(input())
total = 0.0
if town == 'sofia':
if product == 'coffee':
total = quantity * 0.50
elif product == 'peanuts':
total = quantity * 1.60
elif product == 'beer':
total = quantity * 1.20
elif product == 'water':
total = quantity * 0.80
else: # product == 'sweets'
total = quantity * 1.45
elif town == 'plovdiv':
if product == 'coffee':
total = quantity * 0.40
elif product == 'peanuts':
total = quantity * 1.50
elif product == 'beer':
total = quantity * 1.15
elif product == 'water':
total = quantity * 0.70
else: # product == 'sweets'
total = quantity * 1.30
else: # town == 'Varna'
if product == 'coffee':
total = quantity * 0.45
elif product == 'peanuts':
total = quantity * 1.55
elif product == 'beer':
total = quantity * 1.10
elif product == 'water':
total = quantity * 0.70
else: # product == 'sweets'
total = quantity * 1.35
print("{0:.2f}".format(total))
|
[
"[email protected]"
] | |
b49d41c660d323470c0b91f8b0625757281eccd0
|
1be96ee96f3b33469ca073c4f32884cb7230106b
|
/python3_cron_scripts/libs3/ZoneManager.py
|
0531dbedb4a08f885bbf76e4b6fa355e672c65fc
|
[
"Apache-2.0"
] |
permissive
|
vishnurajkv/Marinus
|
3305478038fba8b0ea15dafa2219df9f4df21e9b
|
331ba1dc2e99ae99df6c9d93063a852eec41d578
|
refs/heads/master
| 2020-06-29T10:58:50.196807 | 2019-07-26T20:48:47 | 2019-07-26T20:48:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,583 |
py
|
#!/usr/bin/python3
# Copyright 2018 Adobe. All rights reserved.
# This file is licensed to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy
# of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS
# OF ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""
This class mostly exists because almost every script needs to do a get_distinct_zones
Having it centralized, means that the included and excluded status' can be managed in one place.
"""
from pymongo import MongoClient
from datetime import datetime
from tld import get_fld
class ZoneManager(object):
# A status of confirmed typically means it was entered by a human
CONFIRMED = "confirmed"
# A status of unconfirmed means that it was added via automation
# It has not been revied by a human
UNCONFIRMED = "unconfirmed"
# A status of false positive means that a human identified that automation made a mistake
FALSE_POSITIVE = "false_positive"
# A status of expired means that the automation believes that the domain is no longer registered
EXPIRED = "expired"
# The MongoConnector
mongo_connector = None
# The zone collection
zone_collection = None
def __init__(self, mongo_connector):
"""
Initialize the MongoDB Connector
"""
self.mongo_connector = mongo_connector
self.zone_collection = mongo_connector.get_zone_connection()
def _check_valid_status(self, status):
if status != ZoneManager.EXPIRED and status != ZoneManager.FALSE_POSITIVE and \
status != ZoneManager.CONFIRMED and status!= ZoneManager.UNCONFIRMED:
print("ERROR: Bad status value")
return False
return True
@staticmethod
def get_distinct_zones(mongo_connector, includeAll = False):
"""
This is the most common usage of get zones where the caller wants just the list of
active zones.
This returns the list of zones as an array of strings rather than the complete JSON objects
"""
zones_collection = mongo_connector.get_zone_connection()
if includeAll:
zone_results = mongo_connector.perform_distinct(zones_collection, 'zone')
else:
zone_results = mongo_connector.perform_distinct(zones_collection, 'zone', {'status': {"$nin": [ZoneManager.FALSE_POSITIVE, ZoneManager.EXPIRED]}})
zones = []
for zone in zone_results:
if zone.find(".") >= 0:
zones.append(zone)
return zones
@staticmethod
def get_reversed_zones(mongo_connector):
"""
Retrieve the list of active zones and then reverse them to match the Common Crawl format
"""
zones_collection = mongo_connector.get_zone_connection()
zone_results = mongo_connector.perform_distinct(zones_collection, 'zone', {'status': {"$nin": [ZoneManager.FALSE_POSITIVE, ZoneManager.EXPIRED]}})
zones = []
for zone in zone_results:
if zone.find("."):
zone_parts = zone.split(".")
# The vertices.txt entries from common_crawl are in reverse order (e.g. org.example.www)
# To string match faster, the zones are stored in a reverse format prior to matching.
# This avoids having to reverse each entry in the file which is less efficient.
rev_zone = ""
for part in zone_parts:
rev_zone = part + "." + rev_zone
rev_zone = rev_zone[:-1]
zones.append(rev_zone)
return zones
@staticmethod
def get_zones_by_source(mongo_connector, source, includeAll=False):
"""
Returns a list of zones based on the provided reporting source
"""
zone_collection = mongo_connector.get_zone_connection()
if includeAll:
zones = mongo_connector.perform_distinct(zone_collection, 'zone', {
'reporting_sources.source': source})
else:
zones = mongo_connector.perform_distinct(zone_collection, 'zone', {
'reporting_sources.source': source,
'status': {'$nin': [ZoneManager.FALSE_POSITIVE, ZoneManager.EXPIRED]}})
return zones
@staticmethod
def get_zones(mongo_connector, includeAll=False):
"""
This is will return the full zones object for all active zones.
This returns the complete json objects for the matching descriptions
"""
zones_collection = mongo_connector.get_zone_connection()
if includeAll:
zone_results = mongo_connector.perform_find(zones_collection, {})
else:
zone_results = mongo_connector.perform_find(zones_collection, {'status': {"$nin": [ZoneManager.FALSE_POSITIVE, ZoneManager.EXPIRED]}})
zones = []
for zone in zone_results:
if zone['zone'].find(".") >= 0:
zones.append(zone)
return zones
@staticmethod
def get_root_domain(value, zone=None):
"""
Get the root domain (FLD) for the provided value
"""
res = get_fld(value, fix_protocol=True, fail_silently=True)
if res is None:
return zone
return res
def get_zone(self, zone):
"""
Fetch the full individual zone record.
This is not a staticmethod since it would probably be called repeatedly.
"""
return self.mongo_connector.perform_find(self.zone_collection, {'zone': zone})
def get_zones_by_status(self, status):
"""
This returns the list of zones associated with the provided status.
This returns the list of zones as an array of strings rather than the complete JSON objects
"""
if not self._check_valid_status(status):
return
zone_results = self.mongo_connector.perform_distinct(self.zone_collection, 'zone', {'status': status})
zones = []
for zone in zone_results:
if zone.find(".") >= 0:
zones.append(zone)
return zones
def set_status(self, zone, status, caller):
"""
Set a zone to expired.
"""
if self.zone_collection.find({'zone': zone}).count() == 0:
print("ERROR: Invalid zone!")
return
if status != ZoneManager.EXPIRED and status != ZoneManager.FALSE_POSITIVE and \
status != ZoneManager.CONFIRMED and status!= ZoneManager.UNCONFIRMED:
print("ERROR: Bad status value!")
return
if caller is None or caller == "":
print("ERROR: Please provide a caller value!")
return
now = datetime.now()
note = caller + " set to " + status + " on " + str(now)
self.zone_collection.update({"zone": zone}, {"$set": {"status": status, "updated": now}, "$addToSet": {"notes": note}})
def add_note(self, zone, note):
"""
In the future, there should probably be restrictions on note length.
For now, it is not set until more information on usage is available.
"""
self.zone_collection.update({"zone": zone}, {"$addToSet": {"notes": note}})
|
[
"[email protected]"
] | |
d3ef5ccaa99988559bd5fde97a0082c970a270a1
|
1548ce77537dcd50ab04b0eaee050b5d30553e23
|
/autotabular/algorithms/ctr/xdfm.py
|
003e7cba0a5433e271cb0403bed753da731ebcad
|
[
"Apache-2.0"
] |
permissive
|
Shamoo100/AutoTabular
|
4a20e349104246bf825ebceae33dca0a79928f2e
|
7d71bf01d2b7d84fcf5f65c9f45c5cea1255d8a2
|
refs/heads/main
| 2023-08-13T21:34:34.329888 | 2021-10-02T07:06:00 | 2021-10-02T07:06:00 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,338 |
py
|
import torch
from autotabular.algorithms.ctr.layer import CompressedInteractionNetwork, FeaturesEmbedding, FeaturesLinear, MultiLayerPerceptron
class ExtremeDeepFactorizationMachineModel(torch.nn.Module):
"""A pytorch implementation of xDeepFM.
Reference:
J Lian, et al. xDeepFM: Combining Explicit and Implicit Feature Interactions for Recommender Systems, 2018.
"""
def __init__(self,
field_dims,
embed_dim,
mlp_dims,
dropout,
cross_layer_sizes,
split_half=True):
super().__init__()
self.embedding = FeaturesEmbedding(field_dims, embed_dim)
self.embed_output_dim = len(field_dims) * embed_dim
self.cin = CompressedInteractionNetwork(
len(field_dims), cross_layer_sizes, split_half)
self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims,
dropout)
self.linear = FeaturesLinear(field_dims)
def forward(self, x):
"""
:param x: Long tensor of size ``(batch_size, num_fields)``
"""
embed_x = self.embedding(x)
x = self.linear(x) + self.cin(embed_x) + self.mlp(
embed_x.view(-1, self.embed_output_dim))
return torch.sigmoid(x.squeeze(1))
|
[
"[email protected]"
] | |
b8ef08afc88239a1c064acc5c67624fcb605990c
|
3a918f7baaf088b44227781a5ba017f4dc9a1a13
|
/scripts/extract_results.py
|
6e51015592bbeab3b2067ba63e66c85f713ead57
|
[] |
no_license
|
jagol/MTL-SD
|
7bda0282c6d9eb5ce0ff2e9819f1933c7ceeba73
|
de1cef6c82d3366c5f2bf1430bcd546d8aa0ab92
|
refs/heads/main
| 2023-05-14T04:00:54.034887 | 2021-06-01T21:39:00 | 2021-06-01T21:39:00 | 343,364,812 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,493 |
py
|
import os
import csv
import json
import argparse
from typing import List
from preprocess import *
"""Script to extract relevant results from results-file of allennlp predict-command.
Creates new results file with name <input-name>_extracted.csv.
Removes old results file to avoid cluttering.
"""
def extract_results(fpath: str) -> List[List[float]]:
class_probs = []
with open(fpath) as fin:
probs_key = None
for i, line in enumerate(fin):
if i == 0:
for key in json.loads(line):
if key.endswith('probs'):
probs_key = key
class_probs.append(json.loads(line)[probs_key])
return class_probs
def write_results(results: List[List[float]], fpath_out) -> None:
with open(fpath_out, 'w') as fout:
writer = csv.writer(fout)
for prediction in results:
writer.writerow(prediction)
def main(cmd_args: argparse.Namespace) -> None:
results = extract_results(cmd_args.path)
fdir = '/'.join(cmd_args.path.split('/')[:-1])
fname = cmd_args.path.split('/')[-1]
fname_out = fname.split('.')[0] + '_extracted.csv'
write_results(results, os.path.join(fdir, fname_out))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--path', type=str, required=True,
help='Path to results file generated by allennlp predict-command.')
args = parser.parse_args()
main(args)
|
[
"[email protected]"
] | |
63f124f199d2e152e2fc67618693c424f3febbb7
|
d458b72b4d0e5c51446bb8b9f8a6276015dfb594
|
/math/0x02-calculus/10-matisse.py
|
88cf330d9c797d23e8f981fda83e54f60879e7f5
|
[] |
no_license
|
mecomontes/Machine-Learning-projects
|
d6588cfaa7d020d3fae0fb74f6550c9e84500578
|
50e1828b58bb58eecfd3a142501b37fe701f4e49
|
refs/heads/main
| 2023-07-14T12:30:19.792332 | 2021-08-29T15:33:16 | 2021-08-29T15:33:16 | 376,129,791 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 808 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 8 9:34:16 2020
@author: Robinson Montes
"""
def poly_derivative(poly):
"""
Function that find the derivate of a polynomial
Arguments:
- poly(list of integers): polynomial to calculate the derivate
Return:
List of coefficients representing the derivative of the polynomial
"""
if poly is None or poly == [] or type(poly) is not list:
return None
derivate = []
i = 0
while i < len(poly):
if type(poly[i]) not in (int, float):
return None
elif len(poly) == 1:
derivate.append(0)
else:
if i == 0:
i += 1
continue
derivate.append(poly[i]*i)
i += 1
return derivate
|
[
"[email protected]"
] | |
ba4a59497f41ffefe8c698f0a65012b2d35d88e6
|
b5aeb0f8b8efc77d77842237a80cce90e529ac5f
|
/config/settings.py
|
04b0faaaab467f76b64edc86c9631e42ab3f4de5
|
[] |
no_license
|
Pillin/POC-Django-Cooker
|
b078502d403a90cc57c4691265235ce855c8d75e
|
e6ad88564d3045af4a418234a927970f928e3c58
|
refs/heads/master
| 2022-12-12T15:02:41.410674 | 2019-09-30T03:41:28 | 2019-09-30T03:41:28 | 210,078,139 | 1 | 0 | null | 2022-12-08T05:22:06 | 2019-09-22T02:13:33 |
Python
|
UTF-8
|
Python
| false | false | 4,361 |
py
|
"""
Django settings for nora project.
Generated by 'django-admin startproject' using Django 2.2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ENV = environ.Env()
ENV.read_env(os.path.join(BASE_DIR, '.env'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ENV('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = ENV('DEBUG')
ALLOWED_HOSTS = []
BASE_URL = ENV('BASE_URL')
# Application definition
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'django_extensions',
'users',
'commons',
'meals',
'tags',
'plates',
'menus',
'distributions',
'deliveries'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication',
),
}
# Authentication Settings
AUTH_USER_MODEL = 'users.User'
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
]
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': ENV.db()
}
DATABASES['default']['TEST'] = {
'NAME': 'nora_test'
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'es-cl'
TIME_ZONE = 'Etc/GMT+4'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
CSRF_USE_SESSIONS = True
STATIC_URL = '/static/'
LOGIN_REDIRECT_URL = '/home/'
LOGIN_URL = '/login/'
CSRF_COOKIE_SECURE = True
DATE_FORMAT = '%d/%m/%Y'
TIME_FORMAT = '%H:%M:%S'
SLACK_SERVICE_URL = 'https://hooks.slack.com/services/'
# CELERY COMFIGURATION
BROKER_URL = 'redis://localhost:6379'
CELERY_RESULT_BACKEND = 'redis://localhost:6379'
CELERY_ACCEPT_CONTENT = ['application/json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_TIMEZONE = 'Etc/GMT+4'
CELERY_ALWAYS_EAGER = False
|
[
"[email protected]"
] | |
2d6e5705b0d6fc9452a7eef4f715005355db0acf
|
0067290f8a2c5c367eee2e76f7ec743719d5b59c
|
/one/two/migrations/0002_auto_20170802_1924.py
|
02ba77ac66799d0a3867254c03ad5115c12deb5d
|
[] |
no_license
|
8880/Django
|
d81da8f410845676606eb148a609f56792a14b1b
|
469fe07475c2f7c6e2d1ba1e2119b59550f154e6
|
refs/heads/master
| 2021-01-16T17:54:58.393384 | 2017-08-19T02:55:11 | 2017-08-19T02:55:11 | 100,019,134 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,264 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-02 11:24
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('two', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField(verbose_name='\u8bc4\u8bba\u5185\u5bb9')),
('username', models.CharField(blank=True, max_length=30, null=True, verbose_name='\u7528\u6237\u540d')),
('email', models.EmailField(blank=True, max_length=50, null=True, verbose_name='\u90ae\u7bb1\u5730\u5740')),
('url', models.URLField(blank=True, max_length=100, null=True, verbose_name='\u4e2a\u4eba\u7f51\u9875\u5730\u5740')),
('date_publish', models.DateTimeField(auto_now_add=True, verbose_name='\u53d1\u5e03\u65f6\u95f4')),
],
options={
'verbose_name': '\u8bc4\u8bba',
'verbose_name_plural': '\u8bc4\u8bba',
},
),
migrations.AlterModelOptions(
name='article',
options={'ordering': ['-id'], 'verbose_name': '\u6587\u7ae0', 'verbose_name_plural': '\u6587\u7ae0'},
),
migrations.AddField(
model_name='comment',
name='article',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='two.Article', verbose_name='\u6587\u7ae0'),
),
migrations.AddField(
model_name='comment',
name='pid',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='two.Comment', verbose_name='\u7236\u7ea7\u8bc4\u8bba'),
),
migrations.AddField(
model_name='comment',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='\u7528\u6237'),
),
]
|
[
"klous530.outlook.com"
] |
klous530.outlook.com
|
175d2147b052746aa4fb54eee2552c7003b10d43
|
8df7fa5a5669fab2ec25efea335ab5a8b90d8f76
|
/02-controls/03-ScreenGeometry.py
|
46475f77eb34bbe4fd16e471199d7a789e1df4d0
|
[] |
no_license
|
HackettHsu/PyQt5Tutorial
|
76b458b7c15a5707a7c6ef01e4c5cc5f48296ade
|
01c3f782e48742a57a1152ec114f8c8cec71c323
|
refs/heads/master
| 2022-09-21T00:39:27.710977 | 2020-06-01T06:53:29 | 2020-06-01T06:53:29 | 259,819,043 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,643 |
py
|
'''
屏幕坐标系
PyQt5在进行界面设计时默认存在X-Y坐标系,以(屏幕和程序)左上角为零点
工作区:程序中不包括标题栏的区域
到底哪些含有标题栏?
'''
import sys
from PyQt5.QtWidgets import QHBoxLayout, QMainWindow, QApplication, QPushButton, QWidget
# 单击按钮输出坐标信息
def onClick_Button():
print("Method 1:")
print(f"widget.x() = {widget.x()}")# 窗口坐标。含标题栏
print(f"widget.y() = {widget.y()}")
print(f"widget.width() = {widget.width()}")# 工作区坐标。不含标题栏
print(f"widget.height() = {widget.height()}")
print("Method 2:")
print(f"widget.geometry().x() = {widget.geometry().x()}")# 工作区坐标。不含标题栏
print(f"widget.geometry().y() = {widget.geometry().y()}")
print(f"widget.geometry().width() = {widget.geometry().width()}")
print(f"widget.geometry().height() = {widget.geometry().height()}")
print("Method 3:")
print(f"widget.frameGeometry().x() = {widget.frameGeometry().x()}")# 窗口坐标。含标题栏
print(f"widget.frameGeometry().y() = {widget.frameGeometry().y()}")
print(f"widget.frameGeometry().width() = {widget.frameGeometry().width()}")# 窗口坐标。含有标题栏
print(f"widget.frameGeometry().height() = {widget.frameGeometry().height()}")
app = QApplication(sys.argv)
widget = QWidget()
btn = QPushButton(widget)
btn.setText('按钮')
btn.clicked.connect(onClick_Button)
btn.move(24, 52)
widget.resize(300, 240)# 设置工作区的高度
widget.move(250, 200)
widget.setWindowTitle("屏幕坐标")
widget.show()
sys.exit(app.exec_())
|
[
"[email protected]"
] | |
31a9922a9cadf18a73fa0c106cd377bfb6696751
|
08a68e32dc80f99a37a30ddbbf943337546cc3d5
|
/.history/count/urls_20200419191412.py
|
5d8ed271bfc7e1cb0268e075f7a2e8934d978eed
|
[] |
no_license
|
Space20001/word-count-project
|
dff1b4b44d2f7230070eef0d95dd968b655d92f7
|
795b5e8ad5c59109e96bf7a8e9192efaefa7770e
|
refs/heads/master
| 2022-04-20T17:54:05.511449 | 2020-04-20T15:25:46 | 2020-04-20T15:25:46 | 257,327,368 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 132 |
py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.home),
path('', views.about),
]
|
[
"[email protected]"
] | |
a4e3c2a78a101ae2c35ecf31315de44d777b253f
|
89cd8b77ad5171c336cc60b2133fe6468a6cb53f
|
/Module01_CZ/day7_data_struct_str/04-代码/day7/125_字符串高级操作(判断型).py
|
8f356536eabd5e44cafbc8624e413494095895a0
|
[
"MIT"
] |
permissive
|
fenglihanxiao/Python
|
75178f6b6b0c53345e1ed54226ea645216572d6c
|
872baf3a3a5ee42740161152605ca2b1ddf4cd30
|
refs/heads/master
| 2021-05-23T18:49:20.656433 | 2020-04-29T01:06:21 | 2020-04-29T01:06:21 | 253,199,073 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 272 |
py
|
"""
演示字符串判断型操作
"""
# str1 = "\n"
# print(str1.islower())
# print(str1.isupper())
name = "张三丰"
print(name.startswith("张三"))
filename="1.jpge"
if filename.endswith(".jpg") or filename.endswith(".png") :
print("该文件是一个图片")
|
[
"[email protected]"
] | |
000e1259ee1af55de6f4cf1e16088578c95cd082
|
b3dec5c9347ac443ed36dfd759e6bb58550dd239
|
/functional_tests/test_layout_and_styling.py
|
670b19b27d17eb830593f591479d2f2b103df28e
|
[] |
no_license
|
StevenSLXie/Ask-and-answer
|
a6587ae8424063bfe4f46fa0c3cac7d415237a12
|
778d580f53dfc1b187a95ad41ed504ab79205541
|
refs/heads/master
| 2021-01-01T05:53:53.644232 | 2014-01-07T08:45:49 | 2014-01-07T08:45:49 | 15,699,362 | 3 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 743 |
py
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from django.test import LiveServerTestCase
import unittest
import time
from unittest import skip
from .base import FunctionalTest
class LayoutAndStylingTest(FunctionalTest):
def test_layout_and_styling(self):
# Edith goes to the home page
self.browser.get(self.live_server_url)
self.browser.set_window_size(1024, 768)
# She notices the input box is nicely centered
inputbox.send_keys('testing\n')
inputbox = self.browser.find_element_by_tag_name('input')
self.assertAlmostEqual(
inputbox.location['x'] + inputbox.size['width'] / 2,
512,
delta=3
)
|
[
"[email protected]"
] | |
b9ee4247f3e4f57c0fe3eb2d7c715b195da8c830
|
962dca0c4f20b45a122f17310acf12057ca866d2
|
/old_version/models.py
|
5e859c2eff46a3168e9a7f3f7c45f5696f459216
|
[] |
no_license
|
wyuten/3ch
|
c78e2a5cf0b65813302e84bf12dd60a6dccdb633
|
23c73d907bc36a59907dcc0845dea131fa27dcde
|
refs/heads/master
| 2023-04-06T10:59:39.598242 | 2019-04-13T11:54:35 | 2019-04-13T11:54:35 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 754 |
py
|
# -*- coding: utf-8 -*-
from app import db
from datetime import datetime
class Article(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
title = db.Column(db.String(256), nullable=False)
content = db.Column(db.String(30000), nullable=False)
time = db.Column(db.DateTime, default=datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
class Comment(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
content = db.Column(db.String(3000), nullable=False)
article_id = db.Column(
db.Integer,
db.ForeignKey('article.id'),
nullable=False,
index=True
)
article = db.relationship(Article, foreign_keys=[article_id, ])
|
[
"[email protected]"
] | |
5730ac87d69f4f8cfbe150ec06158ae54a7943a1
|
c557bd8a74d2d4936aeae477fb37be374e904853
|
/If_No_council.py
|
290032b09aef70b7d1a5e950c1b03ff4bd02461d
|
[] |
no_license
|
shalom-k/Hackathon_team2
|
80a6d4f30b89894bb353c3218e24ec2c4d5b7600
|
e1855ad7a68cb613ecece703882d58d024aee215
|
refs/heads/main
| 2023-03-22T02:55:02.787292 | 2021-03-13T23:25:04 | 2021-03-13T23:25:04 | 346,830,380 | 0 | 1 | null | 2021-03-13T22:46:52 | 2021-03-11T20:36:25 |
Python
|
UTF-8
|
Python
| false | false | 1,366 |
py
|
import requests
import json
#find distance between 2 points
def distance(start, fin):
api_key = 'AIzaSyBbwM-62klXAknNAhMWEZ-MVlpfUFYFYko'
url_distance = 'https://maps.googleapis.com/maps/api/distancematrix/json?'
req = requests.get(url_distance + 'origins=' + start +
'&destinations=' + fin + '&key=' + api_key)
values = req.json()
return("%s: %s" % (values['destination_addresses'][0], values['rows'][0]['elements'][0]['distance']['text']))
def nearby_locations(place):
api_key = 'AIzaSyBbwM-62klXAknNAhMWEZ-MVlpfUFYFYko'
url_place = "https://maps.googleapis.com/maps/api/place/textsearch/json?"
r = requests.get(url_place + 'query=' + place + '&key=' + api_key)
x = r.json()
return(x['results'])
def dist(start, place):
location_array = []
data = nearby_locations(place)
for i in range(len(data)):
fin = data[i]['formatted_address']
location_array.append(distance(start, fin))
return(location_array)
#function to get time to each location with distance to each location in an array.
if __name__ == "__main__":
start = input("enter the start location boss :)")
place = input("Enter a place boss :)")
location_array = dist(start, place)
for i in range(len(location_array)):
print(location_array[i])
|
[
"[email protected]"
] | |
a693789de45dbd41820033a6363a8ffeb13c7b85
|
662788b9affd962f8d04d337fe65a418ca5ff209
|
/eden/tutorialKivy/tk012_hexEdit_appUsingModules/aboutDialog.py
|
260a7a900713e8c436c56f2107f454c9e03dec85
|
[] |
no_license
|
xgid/Eden
|
0d4910d27da24594d53a221d80313fb7f0e27788
|
b2e062fd22481bf26bfc7cc78bf2ec0ba875ddfb
|
refs/heads/master
| 2020-03-31T10:04:24.157325 | 2015-01-24T17:46:54 | 2015-01-24T17:46:54 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 984 |
py
|
# Copyright (C) 2005 - 2014 Jacques de Hooge, Geatec Engineering
#
# This program is free software.
# You can use, redistribute and/or modify it, but only under the terms stated in the QQuickLicence.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY, without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the QQuickLicence for details.
# aboutDialog.py
from org.qquick.eden import *
class AboutDialog (Module):
def __init__ (self):
Module.__init__ (self)
def defineNodes (self):
self.addNode (Node (None), 'openNode')
self.addNode (Node (None), 'closeNode')
def defineViews (self):
return ModalView (
ButtonView (captionNode = 'Eden modules demo app\nPress to dismiss', actionNode = self.closeNode),
captionNode = 'About',
closeNode = self.closeNode,
relativeSize = (0.2, 0.3)
)
def defineActions (self):
self.openNode.action = self.getView () .execute
|
[
"[email protected]"
] | |
7b89bfc599a8c522870a3004d962c83174b573a2
|
b980b16022f9661c8fe25d35ffdccdc2bc5ad193
|
/json_fingerprint/_create.py
|
b5e4a7366a9a48c4a958a9fd5740cc37eb729193
|
[
"MIT"
] |
permissive
|
cobaltine/json-fingerprint
|
520456c74faa2ed2a7db2a8bc3b27af2335452a5
|
8face4a37e6db8f5a52727d721dfc80a4c3b031a
|
refs/heads/main
| 2023-05-25T03:43:39.390401 | 2023-05-22T18:03:07 | 2023-05-22T18:10:28 | 322,092,977 | 5 | 2 |
MIT
| 2023-05-22T18:10:29 | 2020-12-16T20:21:59 |
Python
|
UTF-8
|
Python
| false | false | 1,089 |
py
|
from ._jfpv1 import _create_jfpv1_fingerprint
from ._load_json import _load_json
from ._validators import (
_validate_hash_function,
_validate_input_type,
_validate_version,
)
def create(input: str, hash_function: str, version: int) -> str:
"""Create JSON fingerprints with the selected hash function and JSON fingerprint algorithm version.
Args:
input (str):
JSON input in string format.
hash_function (str):
One of the supported hash function names in string format (options: "sha256", "sha384", or "sha512").
version (int):
An integer indicating the JSON fingerprint algorithm version to be used (options: 1).
Returns:
str: A pre-formatted JSON fingerprint (example: "jfpv1${hash_function_name}${hash_hex_digest}").
"""
_validate_version(version=version)
_validate_input_type(input=input)
_validate_hash_function(hash_function=hash_function, version=version)
loaded = _load_json(data=input)
return _create_jfpv1_fingerprint(data=loaded, hash_function=hash_function)
|
[
"[email protected]"
] | |
a268ef38a2861b114ef4f65c5e31730ade40cc92
|
7f68bbb3fd328a4d6bbabecb44305987d8cbbfc4
|
/django/django-intro/home/workspace/PROJECT8/movies/forms.py
|
96b211b33850d9d51473be7e05a26ff57cb8c511
|
[] |
no_license
|
seunghoon2334/TIL
|
c84f9f9e68c8ccc7a1625222fe61f40739774730
|
51cfbad2d9b80a37b359716fca561c2a5c5b48b3
|
refs/heads/master
| 2022-12-18T18:20:19.210587 | 2019-11-26T03:14:23 | 2019-11-26T03:14:23 | 162,101,369 | 0 | 0 | null | 2022-11-22T03:59:16 | 2018-12-17T08:51:53 |
C
|
UTF-8
|
Python
| false | false | 491 |
py
|
from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
from .models import Movie
# modelform
class MovieForm(forms.ModelForm):
class Meta:
model = Movie
fields = '__all__'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = 'POST'
self.helper.add_input(Submit('Submit', '제출!'))
|
[
"[email protected]"
] | |
d313ac27c05892907d934359fa7a177b2f5f2fff
|
633944f913050debf0764c2a29cf3e88f912670e
|
/v8/depot_tools/bootstrap-3.8.0b1.chromium.1_bin/python3/lib/python3.8/site-packages/pip/_internal/vcs/__init__.py
|
4b25ec2e4255710878140a71bd637c31b9cea887
|
[
"BSD-3-Clause",
"bzip2-1.0.6",
"SunPro",
"Apache-2.0"
] |
permissive
|
bopopescu/V8-lgtm
|
0474c2ff39baf754f556ef57619ceae93e7320fd
|
da307e2f7abfca5fa0e860a809de6cd07fd1b72b
|
refs/heads/master
| 2022-02-16T19:10:54.008520 | 2019-09-25T07:51:13 | 2019-09-25T07:51:13 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 100 |
py
|
../../../../../../../.cipd/pkgs/2/_current/lib/python3.8/site-packages/pip/_internal/vcs/__init__.py
|
[
"[email protected]"
] | |
8bf94d64f8dd66a1fa69e2c51090c36a8828b416
|
729ce984b65b68f2da2045cafe0fbc169692af15
|
/string_2/count_hi.py
|
dc41d8786789ae4cd4c967eb5b7c203bd4051f15
|
[] |
no_license
|
imvivek71/CodingBat-Python-Coding
|
00b1f254c30c423405500e18648e52e59878db9d
|
684d432bb5b0a470ab1a86fd4758673b39a16cca
|
refs/heads/master
| 2020-04-15T06:39:52.218845 | 2019-02-04T09:08:03 | 2019-02-04T09:08:03 | 164,468,362 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 365 |
py
|
"""""
Return the number of times that the string "hi" appears anywhere in the given string.
count_hi('abc hi ho') → 1
count_hi('ABChi hi') → 2
count_hi('hihi') → 2
"""
def count_hi(str):
x = len(str)
count = 0
if x>1:
for i in range(1,x):
if (str[i]=='i' and str[i-1]=='h') :
count = count+1
return count
return count
|
[
"[email protected]"
] | |
0519b9a6c3d736fd51361e9def7cf66c291409c5
|
915ac708aeac53125f29bef90c2c047eaed4940e
|
/Anaconda/Scripts/rst2xetex.py
|
2d9179a588e56dbef11208ccd0ed3621286f9cc3
|
[] |
no_license
|
bopopescu/newGitTest
|
c8c480ddd585ef416a5ccb63cbc43e3019f92534
|
5a19f7d01d417a34170a8f760a76e6a8bb7c9274
|
refs/heads/master
| 2021-05-31T17:00:26.656450 | 2016-06-08T06:43:52 | 2016-06-08T06:43:52 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 795 |
py
|
#!C:\aroot\stage\python.exe
# $Id: rst2xetex.py 7038 2011-05-19 09:12:02Z milde $
# Author: Guenter Milde
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing XeLaTeX source code.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline
description = ('Generates XeLaTeX documents from standalone reStructuredText '
'sources. '
'Reads from <source> (default is stdin) and writes to '
'<destination> (default is stdout). See '
'<http://docutils.sourceforge.net/docs/user/latex.html> for '
'the full reference.')
publish_cmdline(writer_name='xetex', description=description)
|
[
"[email protected]"
] | |
b4c40afde973ad9d8f36c83127803a0977c3e0ac
|
6cc958a57fd727ae66bad61548d4a21d023e6058
|
/src/util/util_unit.py
|
44b3790514651d2f66dbc0ac31356ba366102b6c
|
[
"MIT"
] |
permissive
|
oygx210/hpr-sim
|
7135e1cba0a868f24eda6c22294a715c41fce2e5
|
4b7249f64c3c699fb863855eb8f1963b4d06e32e
|
refs/heads/master
| 2023-04-13T01:43:38.068845 | 2021-04-27T02:12:34 | 2021-04-27T02:12:34 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,363 |
py
|
'''
High Power Rocketry - Flight Simulation
MIT License
Copyright (c) 2019 Roshan Doddanavar
https://rdoddanavar.github.io
Path:
hpr-sim/src/util/util_unit.py
Created:
2019-05-25
Type:
Python3 module
Description:
Provides unit conversion utilities.
Functions:
config
convert
Classes:
None
Dependencies:
hpr-sim/src/util/util_yaml
/config_unit.yaml
'''
# System modules
from pathlib import Path
# Project modules
import util_yaml
# Module variables
unitDict = None
def config():
'''
Parses YAML config file, creates global dict of unit conversion factors.
Input(s): <none> \n
Outputs(s): <none>
'''
global unitDict # Necessary for reassignment
if not unitDict:
configPath = Path(__file__).parent / "../../config/config_unit.yaml"
configPath = str(configPath.resolve())
unitDict = util_yaml.load(configPath)
def convert(*args):
'''
Converts input relative to default unit, or between two units.
Input(s): value (float), quantity (str), unitA (str), unitB (str) [opt.] \n
Output(s): value (float)
'''
value = args[0]
quantity = args[1]
unitA = args[2]
if len(args) == 3:
if quantity and unitA:
if quantity == "temperature":
value = convert_temp(value, unitA)
else:
# Need error handling here for bad key
factorA = unitDict[quantity][unitA]
# Evaluate arithmetic operations, if necessary
factorA = util_yaml.math_eval(str(factorA))
value *= factorA
elif len(args) == 4:
unitB = args[3]
if (quantity and unitA and unitB):
if quantity == "temperature":
value = convert_temp(value, unitA, unitB)
else:
# Need error handling here for bad key
factorA = unitDict[quantity][unitA]
factorB = unitDict[quantity][unitB]
# Evaluate arithmetic operations, if necessary
factorA = util_yaml.math_eval(str(factorA))
factorB = util_yaml.math_eval(str(factorB))
factorC = factorA/factorB
value *= factorC
# Original value returned if unit is not specified or nondimensional
return value
def convert_temp(*args):
'''
Converts temperature relative to default unit (K), or between two units.
Input(s): value (float), unitA (str), unitB (str) [opt.] \n
Output(s): value (float)
'''
value = args[0]
quantity = "temperature"
unitA = args[1]
factorA = unitDict[quantity][unitA][0]
offsetA = unitDict[quantity][unitA][1]
factorA = util_yaml.math_eval(str(factorA))
offsetA = util_yaml.math_eval(str(offsetA))
value = value*factorA + offsetA
if len(args) == 3:
unitB = args[2]
factorB = unitDict[quantity][unitB][0]
offsetB = unitDict[quantity][unitB][1]
factorB = util_yaml.math_eval(str(factorB))
offsetB = util_yaml.math_eval(str(offsetB))
value = (value - offsetB)/factorB
return value
if __name__ == "__main__":
# Standalone execution
pass
|
[
"[email protected]"
] | |
9214d8fa1656c1be4d2bc90c80540115d23dcb3f
|
09717c5335b85838f31cfdb464387378be8ba104
|
/toy_test/data_util.py
|
829db87c67a8a814e9ba0dda22a86da0d7458706
|
[
"MIT"
] |
permissive
|
whikwon/seq2seq-translation
|
bd6644f9f03e8debb16085f57ad99c0e1d7fa447
|
6858a5270d6c6bf034fdba27600d3d82090b3874
|
refs/heads/master
| 2021-05-11T12:13:38.442871 | 2018-01-16T11:04:44 | 2018-01-16T11:04:44 | 117,653,885 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,649 |
py
|
import numpy as np
import tensorflow as tf
import os
PAD = 0
UNK = 1
GO = 2
EOS = 3
start_token = GO
end_token = EOS
def read_file(path):
"""Read source from text file"""
input_file = os.path.join(path)
with open(input_file, "r", encoding='utf-8', errors='ignore') as f:
source_sentences = f.read()
return source_sentences
def load_data(path):
"""Read source from text file and train/validation split"""
source_sentences = read_file(path)
vocab = make_vocab(source_sentences)
source_letter_ids = [[vocab.get(letter, vocab['<UNK>']) for letter in line] \
for line in source_sentences.split('\n')]
num_sentences = len(source_letter_ids)
train_val_split = int(num_sentences * 0.8)
train_source = source_letter_ids[:train_val_split]
train_target = [list(reversed(i)) + [3] for i in train_source]
valid_source = source_letter_ids[train_val_split:]
valid_target = [list(reversed(i)) + [3] for i in valid_source]
return train_source, train_target, valid_source, valid_target
def make_vocab(data):
"""Make vocab from source"""
special_words = ['<PAD>', '<UNK>', '<GO>', '<EOS>']
set_words = set([character for line in data.split('\n') for character in line])
int_to_vocab = {word_i: word for word_i, word in enumerate(special_words + list(set_words))}
vocab_to_int = {word: word_i for word_i, word in int_to_vocab.items()}
return vocab_to_int
def pad_sentence_batch(sentence_batch, pad_int):
"""Pad sentences with <PAD> so that each sentence of a batch has the same length"""
max_sentence = max([len(sentence) for sentence in sentence_batch])
return [sentence + [pad_int] * (max_sentence - len(sentence)) for sentence in sentence_batch]
def get_batches(sources, targets, num_epochs, batch_size):
"""Return batch to feed into the model."""
for i_epoch in range(num_epochs):
for batch_i in range(0, len(sources) // batch_size):
start_i = batch_i * batch_size
sources_batch = sources[start_i:start_i + batch_size]
targets_batch = targets[start_i:start_i + batch_size]
pad_sources_batch = np.array(pad_sentence_batch(sources_batch, PAD))
pad_targets_batch = np.array(pad_sentence_batch(targets_batch, PAD))
# Need the lengths for the _lengths parameters
pad_targets_lengths = []
for target in pad_targets_batch:
pad_targets_lengths.append(len(target))
pad_source_lengths = []
for source in pad_sources_batch:
pad_source_lengths.append(len(source))
yield pad_sources_batch, np.array(pad_source_lengths), pad_targets_batch, np.array(pad_targets_lengths)
def process_decoder_input(target_data, vocab_to_int, batch_size):
'''Remove the last word id from each batch and concat the <GO> to the begining of each batch'''
ending = tf.strided_slice(target_data, [0, 0], [batch_size, -1], [1, 1])
dec_input = tf.concat([tf.fill([batch_size, 1], vocab_to_int['<GO>']), ending], 1)
return dec_input
def _get_user_input():
""" Get user's input, which will be transformed into encoder input later """
print("> ", end="")
return input()
def source2id(vocab, text):
"""Convert a source to ids"""
sequence_length = 7
return [vocab.get(word, vocab['<UNK>']) for word in text] \
+ [vocab['<PAD>']] * (sequence_length - len(text))
def id2source(vocab, seq):
"""Convert ids to a source"""
reversed_vocab = {j: i for i, j in vocab.items()}
return ''.join([reversed_vocab[i] for i in seq])
|
[
"[email protected]"
] | |
48f6fab3b18bb1659f37d45e12c7ea01398ed32a
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_bunts.py
|
41d450a12d291732d8830616446e29d1957fe2d2
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 214 |
py
|
#calss header
class _BUNTS():
def __init__(self,):
self.name = "BUNTS"
self.definitions = bunt
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['bunt']
|
[
"[email protected]"
] | |
e569fc7fc6e893e1d228b1d7e4971dcb65008fb8
|
45cc3880f3444276cebb0a7f91d3b146cd27b9d0
|
/beeprint/printer.py
|
63fe4de1510c7695ba066e8687e34780d93a7b3e
|
[] |
no_license
|
aijikl/beeprint
|
056aa84ff73da93c50143c83bed0fdf54bd37ee5
|
0380a942c0ad56ab219a51c728b4244a9b49f405
|
refs/heads/master
| 2021-01-20T04:25:26.858124 | 2017-04-04T06:50:36 | 2017-04-04T06:50:36 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,603 |
py
|
# -*- coding:utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import division
import sys
import traceback
import types
import inspect
from io import StringIO
from .utils import pyv
if pyv == 2:
# avoid throw [UnicodeEncodeError: 'ascii' codec can't encode characters]
# exceptions, without these lines, the sys.getdefaultencoding() returns ascii
from imp import reload
reload(sys)
sys.setdefaultencoding('utf-8')
from . import constants as C
from .utils import print_exc_plus
from .models.block import Block, Context
from .config import Config
from .debug_kit import print_obj_path
def pp(o, output=True, max_depth=5, indent=2, width=80, sort_keys=True, config=None, **kwargs):
"""print data beautifully
"""
if config:
config = config.clone()
else:
config = Config()
assert max_depth > 0
config.max_depth = max_depth
assert indent > 0
config.indent_char = u' '*indent
assert width >= 0
config.string_break_width = width
config.dict_ordered_key_enable = bool(sort_keys)
for k, v in kwargs.items():
if getattr(config, k):
setattr(config, k, v)
if not output:
config.stream = None
try:
res = str(Block(config, Context(obj=o)))
except:
print_obj_path()
raise
if config.debug_level != 0:
if config.debug_delay:
print(config.debug_stream.getvalue())
if not output:
return res
|
[
"[email protected]"
] | |
8bbbaf502c9a337965728c2e82e58a6959f1178e
|
09a076a0e5bc81ccd5a5bc472e817326e4540c09
|
/cycling/polls/mercado/mercado/middleware.py
|
1e23016687ff389288fd00180007cb6a6799dc98
|
[] |
no_license
|
martinnogueira/django22.7tutoriales
|
f5bffb11491169fb5cc0f934270033be2eed2b30
|
2e2b1e47c8d2f5cbf15ff3616eb7e470822b3b4b
|
refs/heads/master
| 2020-03-23T18:59:55.368029 | 2018-07-25T23:19:36 | 2018-07-25T23:19:36 | 141,947,323 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,713 |
py
|
from scrapy import signals
class MercadoSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
|
[
"[email protected]"
] | |
fcaf8123dd2fd421f5fc4ee011401898730fd1c1
|
be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1
|
/Gauss_v45r10p1/Gen/DecFiles/options/11114095.py
|
04981cbb389888968150d038dc6a792df1581176
|
[] |
no_license
|
Sally27/backup_cmtuser_full
|
34782102ed23c6335c48650a6eaa901137355d00
|
8924bebb935b96d438ce85b384cfc132d9af90f6
|
refs/heads/master
| 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,637 |
py
|
# file /home/hep/ss4314/cmtuser/Gauss_v45r10p1/Gen/DecFiles/options/11114095.py generated: Wed, 25 Jan 2017 15:25:18
#
# Event Type: 11114095
#
# ASCII decay Descriptor: [B0 -> K+ pi- (Higgs0 -> mu+ mu-)]cc
#
from Configurables import Generation
Generation().EventType = 11114095
Generation().SampleGenerationTool = "SignalRepeatedHadronization"
from Configurables import SignalRepeatedHadronization
Generation().addTool( SignalRepeatedHadronization )
Generation().SignalRepeatedHadronization.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bd_KpiDarkBoson2MuMu,m=250MeV,t=100ps,DecProdCut.dec"
Generation().SignalRepeatedHadronization.CutTool = "DaughtersInLHCb"
Generation().SignalRepeatedHadronization.SignalPIDList = [ 511,-511 ]
from Gauss.Configuration import *
from Configurables import LHCb__ParticlePropertySvc as ParticlePropertySvc
from Configurables import Gauss, PrintMCTree, PrintMCDecayTreeTool, HistogramPersistencySvc, NTupleSvc, DumpHepMCDecay, DumpHepMCTree, GaussMonitor__CheckLifeTimeHepMC, GaussMonitor__CheckLifeTimeMC, GiGa, GiGaPhysListModular, GiGaHiggsParticles, GenerationToSimulation, PythiaProduction
ParticlePropertySvc().Particles = [ "H_10 87 25 0.0 0.250 1.0000e-10 Higgs0 25 0.000000e+000" ]
ApplicationMgr().ExtSvc += [ ParticlePropertySvc() ]
gigaHiggsPart = GiGaHiggsParticles()
gigaHiggsPart.Higgses = ["H_10"] # H_10, H_20, H_30
GiGaPhysListModular("ModularPL").PhysicsConstructors += [ gigaHiggsPart ]#
|
[
"[email protected]"
] | |
8ab66f039bfb3d4689e240f5275c04e6cab0d10e
|
3b07a0c65c20e512a7578efe76080b277f70da23
|
/packages/clear.py
|
53c2a60d22cca6bf6b5881ce34e51a2b800d5ecc
|
[] |
no_license
|
FuryAdcom/PPython_MiniProyecto1
|
3df19c9fdaa43227b6df8443013361309e2d7aff
|
9f91af84993e80c9c49c0424a17623f1941fa1ee
|
refs/heads/main
| 2023-01-12T13:28:32.547138 | 2020-11-12T13:45:50 | 2020-11-12T13:45:50 | 311,410,120 | 0 | 0 | null | 2020-11-12T13:45:51 | 2020-11-09T17:13:07 |
Python
|
UTF-8
|
Python
| false | false | 132 |
py
|
from os import system, name
def clear():
if name == 'nt':
_ = system('cls')
else:
_ = system('clear')
|
[
"[email protected]"
] | |
5c18325f09c726223108db74bfcdff8194bd5abf
|
0709374e10878df28c7f5359b14ea7eaaac8c5f1
|
/train_models.py
|
d36f050510cbc73bb5c2ef72f7ba120873c6f9c2
|
[] |
no_license
|
rhawiz/dingocv-api
|
c8af7e46df4ba833c71dc64e0321f463b6b1756f
|
d1197d120e8ce27b73903e53a5f76537c7228696
|
refs/heads/master
| 2021-01-16T18:49:30.878196 | 2017-08-14T20:20:37 | 2017-08-14T20:20:37 | 100,119,769 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 307 |
py
|
import os
from app.core.train import AutocompleteTrainer
DATABASE_PATH = os.getenv("DATABASE_PATH", "dingocv_phrases.sqlite")
MODELS_PATH = os.getenv("MODELS_PATH", 'models')
trainer = AutocompleteTrainer(save_dir=os.path.abspath(MODELS_PATH), sqlite_path=os.path.abspath(DATABASE_PATH))
trainer.train()
|
[
"[email protected]"
] | |
058130f56ac7ae3b6cae1b4806a62a3b69fc3b90
|
6adf38f23f24a643d3cacd8daf1eb6e008e2f826
|
/spec_test_2.py
|
3c8ae0abc983f7d46f75c71d9925cba762a0ebf1
|
[] |
no_license
|
nschaffin/Spectrometer-Test-Code
|
a7f08a59ca7f5c50a9954095bdab4b7a468f9107
|
7bf65af534c1920f667defe1aae08a524e49ffe2
|
refs/heads/master
| 2022-11-27T05:53:44.709052 | 2020-08-10T17:38:52 | 2020-08-10T17:38:52 | 278,254,687 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,218 |
py
|
import seabreeze
import seabreeze.spectrometers
from seabreeze.spectrometers import Spectrometer
import time
'''
Meant to be run in interactive mode to catch errors
'''
spec1 = ''
# Step 1 and step 4
def test_spectrometer():
global spec1
# Test from_first_available
first_spec = seabreeze.spectrometers.Spectrometer.from_first_available()
print(f"The first available device is: {first_spec}")
# Test list_devices
spec_list = seabreeze.spectrometers.list_devices()
if spec_list == []:
print("ERROR: No spectrometers listed.")
else:
spec1 = seabreeze.spectrometers.Spectrometer(spec_list[0])
print(f"The devices listed are: {spec_list}. The spectrometer selected is: {spec1}")
# Compare the results of both spectrometers
if first_spec == spec1:
print("list_devices and from_first_available give the same spectrometer")
else:
print(f'first spec = {first_spec}, spec1 = {spec1}')
# try:
# spec1.integration_time_micros(5000)
# time.sleep(1)
# except:
# spec1 = first_spec
# print("\nChanged spectrometer\n")
print(f'spec1 = {spec1}')
# Test integrating when it's disconnected but the spectrometers are still listed
spec1.integration_time_micros(5000) # insert shortest integration time here
wavelengths = spec1.wavelengths()
print(f"Wavelengths: {wavelengths}")
print("\n")
def test():
spec1.integration_time_micros(5000)
spec1.integration_time_micros(5000)
spec1.integration_time_micros(5000)
# Step 2 and 3
def check_spectrometer():
global spec1
# Test list_devices
spec_list = seabreeze.spectrometers.list_devices()
if spec_list == []:
print("No spectrometers listed.")
else:
print(f"The devices listed are: {spec_list}.")
# Test integrating when it's disconnected but the spectrometers are still listed
spec1.integration_time_micros(5000) # insert shortest integration time here
wavelengths = spec1.wavelengths()
print(f"Wavelengths: {wavelengths}")
print("\n")
"""
Connect:
check devices
connect to spec
check if both specs are the same thing
run a command
Disconnected:
check devices
run a command
Reconnect:
check devices
run a command
Reconnect retry:
check devices
connect to spec
check if both specs are the same thing
run a command
"""
|
[
"[email protected]"
] | |
56fcd9d7569cd87ba0cc217a1be8e88301bac6f5
|
361ac3fcf36d80c792b60b7e2284cb1dc8d77944
|
/osa03-16_sanojen_ensimmaiset_kirjaimet/test/test_sanojen_ensimmaiset_kirjaimet.py
|
bd5cdde1c62e7f1ca35d82db216518e44c552e43
|
[] |
no_license
|
darkismus/mooc-ohjelmointi-21
|
48cc20391db4240104549d4f3834a67c77976f6d
|
5f72dd9cff78704a2a0f5bc1cc18c7740ce50c51
|
refs/heads/main
| 2023-08-01T03:35:13.244978 | 2021-09-14T10:49:37 | 2021-09-14T10:49:37 | 368,469,947 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,507 |
py
|
import unittest
from unittest.mock import patch
from tmc import points
from tmc.utils import load_module, reload_module, get_stdout
from functools import reduce
exercise = 'src.sanojen_ensimmaiset_kirjaimet'
def outputs_equal(str1 : str, str2 : str) -> bool:
return str1.lower() == str2.lower()
def get_correct(s : str) -> str:
return "\n".join([x[0] for x in s.split()])
@points('3.sanojen_ensimmaiset_kirjaimet')
class SanojenEnsimmaisetKirjaimetTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
with patch('builtins.input', return_value = "x"):
cls.module = load_module(exercise, 'fi')
def test_lyhyet_lauseet(self):
words = ["Heipparallaa", "Terve kaikille", "Moi vaan kaikille", "Simsalabim, sanoi taikuri",
"Mitäpä tässä hötkyilemään", "Vielä yksi testilause tässä"]
for testcase in words:
with patch('builtins.input', return_value = testcase):
try:
reload_module(self.module)
except:
self.assertFalse(True, f"varmista että ohjelmasti toimii syötteellä\n{testcase}")
output_all = get_stdout()
output = [x.strip() for x in output_all.split("\n") if len(x.strip()) > 0]
correct = get_correct(testcase)
len_correct = len(correct.split("\n"))
self.assertFalse(len(output_all)==0, "Ohjelmasi ei tulosta mitään syötteellä " + testcase)
self.assertTrue(len(output) == len_correct, "Ohjelmasi tulostaa syötteellä ({}) {} rivin sijasta {} riviä: \n{}".
format(testcase, len_correct, len(output), output_all))
self.assertTrue(outputs_equal(output_all, correct),
"Ohjelmasi tuloste\n{}\nei vastaa oikeaa tulostetta \n{} \nsyötteellä ({})".
format(output_all, correct, testcase))
def test_pidemmat_lauseet(self):
words = ["Mitäpä tässä turhia jaarittelemaan, vaan jaarittelenpa tovin sittenkin.",
"Tässäpä vähän pidempi testilause: nähdään samantien miten hyvin ohjelma toimii",
"Otetaanpa vielä yksi testi tähän loppuun: tässä lauseessa onkin aika paljon sanoja."]
for testcase in words:
with patch('builtins.input', return_value = testcase):
try:
reload_module(self.module)
except:
self.assertFalse(True, f"varmista että ohjelmasti toimii syötteellä\n{testcase}")
output_all = get_stdout()
output = [x.strip() for x in output_all.split("\n") if len(x.strip()) > 0]
correct = get_correct(testcase)
len_correct = len(correct.split("\n"))
self.assertFalse(len(output_all)==0, "Ohjelmasi ei tulosta mitään syötteellä " + testcase)
self.assertTrue(len(output) == len_correct, "Ohjelmasi tulostaa syötteellä ({}) {} rivin sijasta {} riviä: \n{}".
format(testcase, len_correct, len(output), output_all))
self.assertTrue(outputs_equal(output_all, correct),
"Ohjelmasi tuloste\n{}\nei vastaa oikeaa tulostetta \n{} \nsyötteellä ({})".
format(output_all, correct, testcase))
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
2f9bdc29452a2db1226f3a1ca5aab9fbdac5e5d6
|
6d0364f7aca2ea76444299d84d467a55b8dfabde
|
/tests/toranj/test-100-mcu-power-state.py
|
4cba5896a9f0a28e51ca54be48ff0350a5037d02
|
[
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause"
] |
permissive
|
particle-iot/openthread
|
b862853867a75a591bcb3dae8f70c2ac9c35eaba
|
668256290d1c48319b0b96d41559efb48dcc0821
|
refs/heads/master
| 2020-03-13T13:02:45.358306 | 2019-07-09T11:38:23 | 2019-07-09T11:38:23 | 131,131,221 | 1 | 0 |
BSD-3-Clause
| 2019-05-19T03:42:57 | 2018-04-26T09:19:12 |
C++
|
UTF-8
|
Python
| false | false | 9,901 |
py
|
#!/usr/bin/env python
#
# Copyright (c) 2018, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import time
import wpan
from wpan import verify
#-----------------------------------------------------------------------------------------------------------------------
# Test description: Testing controlling of NCP's MCU power state
test_name = __file__[:-3] if __file__.endswith('.py') else __file__
print '-' * 120
print 'Starting \'{}\''.format(test_name)
#-----------------------------------------------------------------------------------------------------------------------
# Creating `wpan.Nodes` instances
node = wpan.Node()
#-----------------------------------------------------------------------------------------------------------------------
# Init all nodes
wpan.Node.init_all_nodes()
#-----------------------------------------------------------------------------------------------------------------------
# Test implementation
# Verify that state is ON after a reset
verify(node.get(wpan.WPAN_NCP_MCU_POWER_STATE) == wpan.MCU_POWER_STATE_ON)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Check power state wpantund property get and set
WAIT_TIME = 5
def check_wpan_is_in_offline_state():
verify(node.get(wpan.WPAN_STATE) == wpan.STATE_OFFLINE)
def check_wpan_is_in_deep_sleep_state():
verify(node.get(wpan.WPAN_STATE) == wpan.STATE_DEEP_SLEEP)
def check_wpan_is_in_commissioned_state():
verify(node.get(wpan.WPAN_STATE) == wpan.STATE_COMMISSIONED)
def check_wpan_is_in_associated_state():
verify(node.get(wpan.WPAN_STATE) == wpan.STATE_ASSOCIATED)
def check_wpan_is_in_associating_state():
verify(node.get(wpan.WPAN_STATE) == wpan.STATE_ASSOCIATING)
node.form("mcu-power-state")
verify(node.is_associated())
node.set(wpan.WPAN_NCP_MCU_POWER_STATE, 'low-power')
verify(node.get(wpan.WPAN_NCP_MCU_POWER_STATE) == wpan.MCU_POWER_STATE_LOW_POWER)
verify(node.get(wpan.WPAN_STATE) == wpan.STATE_ASSOCIATED)
node.set(wpan.WPAN_NCP_MCU_POWER_STATE, 'on')
verify(node.get(wpan.WPAN_NCP_MCU_POWER_STATE) == wpan.MCU_POWER_STATE_ON)
node.set(wpan.WPAN_NCP_MCU_POWER_STATE, 'lp') # special short-form string for low-power
verify(node.get(wpan.WPAN_NCP_MCU_POWER_STATE) == wpan.MCU_POWER_STATE_LOW_POWER)
node.set(wpan.WPAN_NCP_MCU_POWER_STATE, wpan.MCU_POWER_STATE_ON)
verify(node.get(wpan.WPAN_NCP_MCU_POWER_STATE) == wpan.MCU_POWER_STATE_ON)
node.set(wpan.WPAN_NCP_MCU_POWER_STATE, wpan.MCU_POWER_STATE_LOW_POWER)
verify(node.get(wpan.WPAN_NCP_MCU_POWER_STATE) == wpan.MCU_POWER_STATE_LOW_POWER)
verify(node.get(wpan.WPAN_STATE) == wpan.STATE_ASSOCIATED)
# Verify that `wpantund` will restore the user-set value after NCP reset
node.reset()
time.sleep(1)
verify(node.get(wpan.WPAN_NCP_MCU_POWER_STATE) == wpan.MCU_POWER_STATE_LOW_POWER)
node.set(wpan.WPAN_NCP_MCU_POWER_STATE, wpan.MCU_POWER_STATE_ON)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Check the `wpantund` state changes between "deep-sleep" and "offline"
node.leave()
verify(not node.is_associated())
verify(node.get(wpan.WPAN_NCP_MCU_POWER_STATE) == wpan.MCU_POWER_STATE_ON)
verify(node.get(wpan.WPAN_STATE) == wpan.STATE_OFFLINE)
# Setting the power state to `low-power` should change wpantund state to `DEEP_SLEEP`
node.set(wpan.WPAN_NCP_MCU_POWER_STATE, wpan.MCU_POWER_STATE_LOW_POWER)
wpan.verify_within(check_wpan_is_in_deep_sleep_state, WAIT_TIME)
# Verify that reading/getting a property does not impact the wpantund state.
node.get(wpan.WPAN_THREAD_RLOC16)
verify(node.get(wpan.WPAN_NCP_MCU_POWER_STATE) == wpan.MCU_POWER_STATE_LOW_POWER)
verify(node.get(wpan.WPAN_STATE) == wpan.STATE_DEEP_SLEEP)
# Setting the power state to `on` should change wpantund state to `OFFLINE`
node.set(wpan.WPAN_NCP_MCU_POWER_STATE, wpan.MCU_POWER_STATE_ON)
wpan.verify_within(check_wpan_is_in_offline_state, WAIT_TIME)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Verify the behavior of `begin-low-power` wpanctl command
node.wpanctl('begin-low-power')
wpan.verify_within(check_wpan_is_in_deep_sleep_state, WAIT_TIME)
verify(node.get(wpan.WPAN_NCP_MCU_POWER_STATE) == wpan.MCU_POWER_STATE_LOW_POWER)
node.set(wpan.WPAN_NCP_MCU_POWER_STATE, wpan.MCU_POWER_STATE_ON)
wpan.verify_within(check_wpan_is_in_offline_state, WAIT_TIME)
# Check the `wpantund` state changes between "offline:commissioned" and "deep-sleep"
node.form("test-network")
node.set('Daemon:AutoAssociateAfterReset','0')
# Verify that issuing a `begin-low-power` when in "associated" state
# does not change the state.
node.wpanctl('begin-low-power')
verify(node.get(wpan.WPAN_NCP_MCU_POWER_STATE) == wpan.MCU_POWER_STATE_LOW_POWER)
verify(node.get(wpan.WPAN_STATE) == wpan.STATE_ASSOCIATED)
# After reset, power state should remain `LOW_POWER` (wpantund would restore the value
# on NCP) and since "AutoAssociateAfterReset" is disabled, wpantund state should
# be `DEEP_SLEEP`.
node.reset()
wpan.verify_within(check_wpan_is_in_deep_sleep_state, WAIT_TIME)
node.set(wpan.WPAN_NCP_MCU_POWER_STATE, wpan.MCU_POWER_STATE_ON)
wpan.verify_within(check_wpan_is_in_commissioned_state, WAIT_TIME)
node.set(wpan.WPAN_NCP_MCU_POWER_STATE, wpan.MCU_POWER_STATE_LOW_POWER)
wpan.verify_within(check_wpan_is_in_deep_sleep_state, WAIT_TIME)
node.set(wpan.WPAN_NCP_MCU_POWER_STATE, wpan.MCU_POWER_STATE_ON)
node.leave()
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Verify sleep behavior after disabling `wpantund` ("Daemon:Enabled" property) when state is "offline"
verify(node.get(wpan.WPAN_NCP_MCU_POWER_STATE) == wpan.MCU_POWER_STATE_ON)
verify(node.get(wpan.WPAN_STATE) == wpan.STATE_OFFLINE)
verify(node.get('Daemon:Enabled') == 'true')
# Disabling `wpantund` should put the NCP to deep sleep
node.set('Daemon:Enabled', 'false');
verify(node.get('Daemon:Enabled') == 'false')
wpan.verify_within(check_wpan_is_in_deep_sleep_state, WAIT_TIME)
verify(node.get(wpan.WPAN_NCP_MCU_POWER_STATE) == wpan.MCU_POWER_STATE_LOW_POWER)
# Enabling `wpantund` should update the `MCU_POWER_STATE` back to `ON`.
node.set('Daemon:Enabled', 'true');
wpan.verify_within(check_wpan_is_in_offline_state, WAIT_TIME)
verify(node.get(wpan.WPAN_NCP_MCU_POWER_STATE) == wpan.MCU_POWER_STATE_ON)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Verify sleep behavior after disabling `wpantund` ("Daemon:Enabled" property) when state is "associated"
node.form("disable-test")
verify(node.is_associated())
verify(node.get(wpan.WPAN_NCP_MCU_POWER_STATE) == wpan.MCU_POWER_STATE_ON)
node.set('Daemon:Enabled', 'false');
verify(node.get('Daemon:Enabled') == 'false')
wpan.verify_within(check_wpan_is_in_deep_sleep_state, WAIT_TIME)
verify(node.get(wpan.WPAN_NCP_MCU_POWER_STATE) == wpan.MCU_POWER_STATE_LOW_POWER)
node.set('Daemon:Enabled', 'true');
wpan.verify_within(check_wpan_is_in_commissioned_state, WAIT_TIME)
verify(node.get(wpan.WPAN_NCP_MCU_POWER_STATE) == wpan.MCU_POWER_STATE_ON)
node.leave()
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Verify `AutoAssociateAfterReset` behavior after reset from "deep-sleep" (but commissioned).
node.set('Daemon:AutoAssociateAfterReset', '1')
node.set(wpan.WPAN_NCP_MCU_POWER_STATE, wpan.MCU_POWER_STATE_LOW_POWER)
verify(node.get(wpan.WPAN_NCP_MCU_POWER_STATE) == wpan.MCU_POWER_STATE_LOW_POWER)
node.form("resume-test")
verify(node.is_associated())
verify(node.get(wpan.WPAN_NCP_MCU_POWER_STATE) == wpan.MCU_POWER_STATE_LOW_POWER)
node.reset()
# After reset, power state should remain `LOW_POWER` (wpantund would restore the value
# on NCP) and wpantund state should start as "deep-sleep" but since AutoAssociateAfterReset
# is enabled, network should be recovered.
wpan.verify_within(check_wpan_is_in_associating_state, WAIT_TIME)
verify(node.get(wpan.WPAN_NCP_MCU_POWER_STATE) == wpan.MCU_POWER_STATE_LOW_POWER)
#-----------------------------------------------------------------------------------------------------------------------
# Test finished
wpan.Node.finalize_all_nodes()
print '\'{}\' passed.'.format(test_name)
|
[
"[email protected]"
] | |
415935edef31996e2b359804e324f5f7b3d48614
|
ab9b75fcdd2b7352968886e5ed41ee7788216226
|
/src/gamesbyexample/stickyhands.py
|
a1af5601756ea83263f3a20e8dd2bb26220102ac
|
[
"MIT"
] |
permissive
|
mgocken/PythonStdioGames
|
d7b48cafbc33a027548cab08ad08aea6c0c81abd
|
036d2f142581fb74a38400721aecce15a695e1bc
|
refs/heads/master
| 2020-09-29T18:35:34.589307 | 2019-12-06T00:15:46 | 2019-12-06T00:15:46 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,528 |
py
|
# Sticky Hands, by Al Sweigart [email protected]
# A jewel-stealing, movement puzzle game.
__version__ = 1
# Inspired by Herding Cats https://w.itch.io/herding-cats
# TODO - Enter R to reset the entire level.
import copy, os, sys
# Setup the constants:
WALL = chr(9608)
FACE = chr(9786)
DIAMOND = chr(9830)
CHAR_MAP = {'#': WALL, '@': FACE, '$': DIAMOND, ' ': ' '} # TODO add comment
# Display the title banner and instructions:
print('''Sticky Hands: A diamond collecting game.
By Al Sweigart [email protected]
Pick up diamonds by standing next to them. Stuck diamonds also
become sticky. Try to stick every diamond in the level.
Enter WASD letters to move, numbers to switch levels, U to undo a
move, or "quit" to quit the game. You can enter multiple WASD or U
letters to make several moves at once.
''')
# Load each level from stickyhandslevels.txt
if not os.path.exists('stickyhandslevels.txt'):
print('Download the level file from https://github.com/asweigart/PythonStdioGames/blob/master/src/stickyhandslevels.txt')
sys.exit()
ALL_LEVELS = []
with open('stickyhandslevels.txt') as levelFile:
currentLevelFromFile = {'width': 0, 'height': 0, 'diamonds': 0} # Each level is represented by a dictionary.
y = 0
for line in levelFile.readlines():
if line.startswith(';'):
continue # Ignore comments in the level file.
if line == '\n':
if currentLevelFromFile == {'width': 0, 'height': 0, 'diamonds': 0}:
continue # Ignore this line, and continue to the next line.
# Finished with the current level:
ALL_LEVELS.append(currentLevelFromFile)
currentLevelFromFile = {'width': 0, 'height': 0, 'diamonds': 0}
y = 0 # Reset y back to 0.
continue
# Add the line to the current level.
# We use line[:-1] so we don't include the newline:
for x, levelChar in enumerate(line[:-1]):
currentLevelFromFile[(x, y)] = levelChar
# Keep track of how many diamonds are in the level:
if levelChar == '$':
currentLevelFromFile['diamonds'] += 1
y += 1
if len(line) - 1 > currentLevelFromFile['width']:
currentLevelFromFile['width'] = len(line) - 1
if y > currentLevelFromFile['height']:
currentLevelFromFile['height'] = y
def drawLevel(levelNum, levelData):
# Draw the current level.
print('Level #' + str(levelNum + 1), 'of', len(ALL_LEVELS))
for y in range(levelData['height']):
for x in range(levelData['width']):
prettyChar = CHAR_MAP[levelData.get((x, y), ' ')]
print(prettyChar, end='')
print()
def getPlayerBlobPoints(levelData, playerx, playery):
playerBlob = [(playerx, playery)]
pointsToCheck = [(playerx, playery)]
alreadyCheckedPoints = []
while len(pointsToCheck) > 0:
x, y = pointsToCheck.pop()
alreadyCheckedPoints.append((x, y))
if (x - 1, y) not in alreadyCheckedPoints and levelData[(x - 1, y)] == '$':
playerBlob.append((x - 1, y))
pointsToCheck.append((x - 1, y))
if (x + 1, y) not in alreadyCheckedPoints and levelData[(x + 1, y)] == '$':
playerBlob.append((x + 1, y))
pointsToCheck.append((x + 1, y))
if (x, y - 1) not in alreadyCheckedPoints and levelData[(x, y - 1)] == '$':
playerBlob.append((x, y - 1))
pointsToCheck.append((x, y - 1))
if (x, y + 1) not in alreadyCheckedPoints and levelData[(x, y + 1)] == '$':
playerBlob.append((x, y + 1))
pointsToCheck.append((x, y + 1))
return playerBlob
currentLevelNumber = 0
currentLevel = copy.copy(ALL_LEVELS[currentLevelNumber])
undoStack = [copy.copy(currentLevel)]
while True: # Main game loop.
drawLevel(currentLevelNumber, currentLevel)
# Get the input from the player:
moves = input('Enter moves> ').upper()
if moves == 'QUIT':
print('Thanks for playing!')
sys.exit()
if moves.isdecimal():
if not (1 <= int(moves) < len(ALL_LEVELS)):
print('Enter a level number between 1 and', len(ALL_LEVELS))
continue
# Change the current level:
currentLevelNumber = int(moves) - 1
currentLevel = copy.copy(ALL_LEVELS[currentLevelNumber])
undoStack = [copy.copy(currentLevel)]
continue
# Validate the input; make sure it only has W, A, S, D, or U:
movesAreValid = True
for move in moves:
if move not in ('W', 'A', 'S', 'D', 'U'):
movesAreValid = False
print(move, 'is not a valid move.')
break
if not movesAreValid:
continue
# Carry out the moves:
for move in moves:
# Find the player position:
for position, character in currentLevel.items():
if character == '@':
playerx, playery = position
if move == 'U':
if len(undoStack) == 1:
continue # Can't undo past the first move.
undoStack.pop() # Remove the last item from the undoStack list.
currentLevel = copy.copy(undoStack[-1])
continue
if move == 'W':
movex, movey = 0, -1
elif move == 'A':
movex, movey = -1, 0
elif move == 'S':
movex, movey = 0, 1
elif move == 'D':
movex, movey = 1, 0
playerBlob = getPlayerBlobPoints(currentLevel, playerx, playery)
blobCanMove = True
for blobPoint in playerBlob:
blobx, bloby = blobPoint[0], blobPoint[1]
moveToSpace = currentLevel.get((blobx + movex, bloby + movey), ' ')
# If the move-to space is a wall, don't move at all:
if moveToSpace == '#':
blobCanMove = False
break
if blobCanMove:
newBlobPoints = []
for blobPoint in playerBlob:
blobx, bloby = blobPoint[0], blobPoint[1]
# If the move-to space is empty or a goal, just move there:
if currentLevel[(blobx, bloby)] == '@':
currentLevel[(blobx, bloby)] = ' '
newBlobPoints.append((blobx + movex, bloby + movey, '@'))
elif currentLevel[(blobx, bloby)] == '$':
currentLevel[(blobx, bloby)] = ' '
newBlobPoints.append((blobx + movex, bloby + movey, '$'))
for newBlobPoint in newBlobPoints:
# Set the player's new position:
currentLevel[(newBlobPoint[0], newBlobPoint[1])] = newBlobPoint[2] # TODO - refactor this.
# Save the state of the level for the undo feature:
undoStack.append(copy.copy(currentLevel))
# Check if the player has finished the level:
levelIsSolved = False
playerBlob = getPlayerBlobPoints(currentLevel, playerx + movex, playery + movey)
if len(playerBlob) - 1 == currentLevel['diamonds']:
levelIsSolved = True
if levelIsSolved:
drawLevel(currentLevelNumber, currentLevel)
print('Level complete!')
input('Press Enter to continue...')
currentLevelNumber = (currentLevelNumber + 1) % len(ALL_LEVELS)
currentLevel = copy.copy(ALL_LEVELS[currentLevelNumber])
undoStack = [copy.copy(currentLevel)]
break # Don't carry out any remaining moves.
|
[
"[email protected]"
] | |
172bb00f20da38f71b5cc4ba3039f8e12d3e9b89
|
bf44592899afb4192579105cc8f3102f8bddef4c
|
/website/login_module/admin.py
|
df30291c8c4e638171e68cd5cea822a1872a46a8
|
[] |
no_license
|
VigneshwarRavichandran/Django-Test
|
7e3021cc27981656a71d21f72d17f1b7955098e9
|
ad11470c5a2e30d802415e6c6236ccac90ff792c
|
refs/heads/master
| 2020-08-01T23:21:41.182811 | 2019-10-09T17:02:06 | 2019-10-09T17:02:06 | 211,155,579 | 2 | 0 | null | 2019-10-09T17:02:25 | 2019-09-26T18:25:47 |
Python
|
UTF-8
|
Python
| false | false | 115 |
py
|
from django.contrib import admin
from .models import *
admin.site.register(Users)
admin.site.register(UserDetails)
|
[
"[email protected]"
] | |
e662722fad68cff102487d6ba08454d41807ad11
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/dev/cv/detection/YOLOX_Dynamic_ID4069_for_PyTorch/yolox/layers/fast_coco_eval_api.py
|
55bfa28a1c06813d48ff90862908a7655239001e
|
[
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 |
Apache-2.0
| 2022-10-15T09:29:12 | 2022-04-20T04:11:18 |
Python
|
UTF-8
|
Python
| false | false | 6,464 |
py
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# This file comes from
# https://github.com/facebookresearch/detectron2/blob/master/detectron2/evaluation/fast_eval_api.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Copyright (c) Megvii Inc. All rights reserved.
import copy
import time
import numpy as np
from pycocotools.cocoeval import COCOeval
from .jit_ops import FastCOCOEvalOp
class COCOeval_opt(COCOeval):
"""
This is a slightly modified version of the original COCO API, where the functions evaluateImg()
and accumulate() are implemented in C++ to speedup evaluation
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.module = FastCOCOEvalOp().load()
def evaluate(self):
"""
Run per image evaluation on given images and store results in self.evalImgs_cpp, a
datastructure that isn't readable from Python but is used by a c++ implementation of
accumulate(). Unlike the original COCO PythonAPI, we don't populate the datastructure
self.evalImgs because this datastructure is a computational bottleneck.
:return: None
"""
tic = time.time()
print("Running per image evaluation...")
p = self.params
# add backward compatibility if useSegm is specified in params
if p.useSegm is not None:
p.iouType = "segm" if p.useSegm == 1 else "bbox"
print(
"useSegm (deprecated) is not None. Running {} evaluation".format(
p.iouType
)
)
print("Evaluate annotation type *{}*".format(p.iouType))
p.imgIds = list(np.unique(p.imgIds))
if p.useCats:
p.catIds = list(np.unique(p.catIds))
p.maxDets = sorted(p.maxDets)
self.params = p
self._prepare()
# loop through images, area range, max detection number
catIds = p.catIds if p.useCats else [-1]
if p.iouType == "segm" or p.iouType == "bbox":
computeIoU = self.computeIoU
elif p.iouType == "keypoints":
computeIoU = self.computeOks
self.ious = {
(imgId, catId): computeIoU(imgId, catId)
for imgId in p.imgIds
for catId in catIds
}
maxDet = p.maxDets[-1]
# <<<< Beginning of code differences with original COCO API
def convert_instances_to_cpp(instances, is_det=False):
# Convert annotations for a list of instances in an image to a format that's fast
# to access in C++
instances_cpp = []
for instance in instances:
instance_cpp = self.module.InstanceAnnotation(
int(instance["id"]),
instance["score"] if is_det else instance.get("score", 0.0),
instance["area"],
bool(instance.get("iscrowd", 0)),
bool(instance.get("ignore", 0)),
)
instances_cpp.append(instance_cpp)
return instances_cpp
# Convert GT annotations, detections, and IOUs to a format that's fast to access in C++
ground_truth_instances = [
[convert_instances_to_cpp(self._gts[imgId, catId]) for catId in p.catIds]
for imgId in p.imgIds
]
detected_instances = [
[
convert_instances_to_cpp(self._dts[imgId, catId], is_det=True)
for catId in p.catIds
]
for imgId in p.imgIds
]
ious = [[self.ious[imgId, catId] for catId in catIds] for imgId in p.imgIds]
if not p.useCats:
# For each image, flatten per-category lists into a single list
ground_truth_instances = [
[[o for c in i for o in c]] for i in ground_truth_instances
]
detected_instances = [
[[o for c in i for o in c]] for i in detected_instances
]
# Call C++ implementation of self.evaluateImgs()
self._evalImgs_cpp = self.module.COCOevalEvaluateImages(
p.areaRng,
maxDet,
p.iouThrs,
ious,
ground_truth_instances,
detected_instances,
)
self._evalImgs = None
self._paramsEval = copy.deepcopy(self.params)
toc = time.time()
print("COCOeval_opt.evaluate() finished in {:0.2f} seconds.".format(toc - tic))
# >>>> End of code differences with original COCO API
def accumulate(self):
"""
Accumulate per image evaluation results and store the result in self.eval. Does not
support changing parameter settings from those used by self.evaluate()
"""
print("Accumulating evaluation results...")
tic = time.time()
if not hasattr(self, "_evalImgs_cpp"):
print("Please run evaluate() first")
self.eval = self.module.COCOevalAccumulate(self._paramsEval, self._evalImgs_cpp)
# recall is num_iou_thresholds X num_categories X num_area_ranges X num_max_detections
self.eval["recall"] = np.array(self.eval["recall"]).reshape(
self.eval["counts"][:1] + self.eval["counts"][2:]
)
# precision and scores are num_iou_thresholds X num_recall_thresholds X num_categories X
# num_area_ranges X num_max_detections
self.eval["precision"] = np.array(self.eval["precision"]).reshape(
self.eval["counts"]
)
self.eval["scores"] = np.array(self.eval["scores"]).reshape(self.eval["counts"])
toc = time.time()
print(
"COCOeval_opt.accumulate() finished in {:0.2f} seconds.".format(toc - tic)
)
|
[
"[email protected]"
] | |
abc9188983419180bd10fbed0de5aaa7610ece80
|
0ec9b1f5435854638befc7efadca0577cc9c6505
|
/mlsemcwk/viewdata.py
|
0fe474e8dcaca77150e2b51eac3728cfa4d4ce4d
|
[] |
no_license
|
michaelwh/mlsemcwk
|
f4ec19ab417b6cc2c216d68835f35b0790d484ec
|
5f59e8c50c56d53c0de84dd16d5d49c292e92a39
|
refs/heads/master
| 2021-01-20T03:26:02.560111 | 2011-05-18T14:00:40 | 2011-05-18T14:00:40 | 1,752,018 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,857 |
py
|
#!/usr/bin/env python
# Tk-matplotlib integration code from http://matplotlib.sourceforge.net/examples/user_interfaces/embedding_in_tk2.html
import matplotlib
matplotlib.use('TkAgg')
from numpy import arange, sin, pi
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib import pyplot
import Tkinter as Tk
import sys
import PyML as pyml
import semdatautil
def destroy(e): sys.exit()
class MatplotlibTkFigFrame(object):
def __init__(self, fig):
self.fig = fig
## ------------------------- TK STUFF
self.root = Tk.Tk()
self.root.wm_title("MatplotlibTkFigFrame")
#root.bind("<Destroy>", destroy)
# a tk.DrawingArea
self.canvas = FigureCanvasTkAgg(fig, master=self.root)
self.canvas.show()
self.canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
#toolbar = NavigationToolbar2TkAgg( canvas, root )
#toolbar.update()
self.canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
## ^^^^^^^^^^^^^^^^^^^^^^^^^^ TK STUFF
def run_tk_mainloop(self):
Tk.mainloop()
class DataViewer(MatplotlibTkFigFrame):
def __init__(self, fig, ax, datarows, labels=None, startno=0):
super(DataViewer, self).__init__(fig)
self.currdatano = 0
self.fig = fig
self.ax = ax
self.datarows = datarows
self.labels = labels
self.quitbutton = Tk.Button(master=self.root, text='Quit', command=sys.exit)
self.quitbutton.pack(side=Tk.BOTTOM)
self.nextbutton = Tk.Button(master=self.root, text='>', command=self.next_data)
self.nextbutton.pack(side=Tk.BOTTOM)
self.prevbutton = Tk.Button(master=self.root, text='<', command=self.prev_data)
self.prevbutton.pack(side=Tk.BOTTOM)
self.show_data(startno)
super(DataViewer, self).run_tk_mainloop()
def show_data(self, datano):
self.currdatano = datano
self.ax.imshow(semdatautil.sem_datarow_to_image(self.datarows[datano]), cmap=pyplot.gray())
self.fig.canvas.draw()
print self.currdatano
if self.labels != None:
print "Label: " + str(self.labels[self.currdatano])
def next_data(self):
self.show_data(self.currdatano + 1)
def prev_data(self):
if self.currdatano > 0:
self.show_data(self.currdatano - 1)
if __name__ == '__main__':
fig = Figure()#figsize=(10,10))#, dpi=100)
ax = fig.add_subplot(111)
ax.set_title('Data')
datarows, labels = semdatautil.get_sem_data('semeion.data')
print labels
print datarows[0]
dataviewer = DataViewer(fig, ax, datarows, labels=labels)
|
[
"[email protected]"
] | |
44a2c07bd6f7ec678545c4d5e02cbf32aa98c42d
|
58575ac7ea2aa81f0267a2badd8b40b06b37f8f4
|
/production.py
|
37b6508cfa8cfdd0248b0a9e13bfcf64ca8ee963
|
[] |
no_license
|
wenbinhuang9/LL-first-follow
|
60c02172af53ff56667b3b210709ca61dc4dd5ab
|
aeafde3f8b75654e65fd82fec17af8dd76026267
|
refs/heads/master
| 2021-05-20T16:31:31.037819 | 2020-04-03T01:00:33 | 2020-04-03T01:00:33 | 252,368,991 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,149 |
py
|
def decodeProductionList(file):
ans = []
start = None
with open(file) as fd:
lines = fd.readlines()
for line in lines:
if line.startswith("start"):
start = line.split()[1]
elif line != "":
production = decodeProduction(line)
ans.append(production)
if start == None:
return ans
else:
return (start, ans)
def decodeProduction(line):
production_rule = line.split("->")
left, right_rule = production_rule[0], production_rule[1]
production = Production().left(left)
rights = right_rule.split("|")
production.right([right.strip() for right in rights])
return production
class Production():
def __init__(self):
self.nonterminal = None
self.rightList = []
def left(self, nonterminal):
self.nonterminal = nonterminal
return self
def right(self, rightDerivations):
if isinstance(rightDerivations, list):
self.rightList.extend(rightDerivations)
else:
self.rightList.append(rightDerivations)
return self
|
[
"[email protected]"
] | |
6747247f5f548f5e24f7c1afa2b192c7d9a0e06e
|
e8600b0404bf67f7046b81a7afde513dbbaad622
|
/recipe-search/urls.py
|
15ee99859e042a65236e7b4d5c134f11677a8534
|
[
"BSD-3-Clause"
] |
permissive
|
talpor/recipe-search-hackathon
|
f46817041b2310f3b08c3e163fd8bab4e56df1be
|
7471f08e1067317f3bb1f35b4ebd4b3cd96e0580
|
refs/heads/master
| 2016-09-11T00:21:45.429716 | 2015-02-23T02:15:42 | 2015-02-23T02:15:42 | 31,127,311 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 679 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.views.generic import TemplateView
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^', include('recipe.urls'), name="home"),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
# User management
url(r'^users/', include("users.urls", namespace="users")),
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"[email protected]"
] | |
0318a8bc4860d8f8c1d1970f31b869a66bf7252a
|
aa61aca81cbe97ec6a012ddd55f5780df98f952a
|
/Final_task/libraries/UnixToIso.py
|
267a612b6580d45f8400c020196c53858ad88136
|
[] |
no_license
|
R1ckNash/Testing_the_openweathermap_website
|
3141870e774fb39d908c98a825af92f3aefde0d5
|
6d86b16e1313cc7aa9a769669ed06affacb10b8b
|
refs/heads/master
| 2022-10-13T19:17:56.607743 | 2022-09-14T07:44:53 | 2022-09-14T07:44:53 | 260,710,508 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,939 |
py
|
seconds = 86400
def get_year(unixtime):
year = 365
leap_year = 366
current_year = 1970
cur_unix = 0
tmp_unix = 0
while 1:
if cur_unix > unixtime:
current_year -= 1
break
if (current_year % 4 == 0 and current_year % 100 != 0) or current_year % 400 == 0:
tmp_unix = cur_unix
cur_unix += leap_year * seconds
else:
tmp_unix = cur_unix
cur_unix += year * seconds
current_year += 1
return current_year, unixtime - tmp_unix
def get_month(current_year, unix):
mas_year_default = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
mas_year_leap = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
tmp_unix = 0
tmp_unix_default = 0
tmp_unix_leap = 0
cur_month = 1
for i in range(0, len(mas_year_leap)):
if (current_year % 4 == 0 and current_year % 100 != 0) or current_year % 400 == 0:
tmp_unix_leap = tmp_unix
tmp_unix += mas_year_leap[i] * seconds
else:
tmp_unix_default = tmp_unix
tmp_unix += mas_year_default[i] * seconds
cur_month += 1
if tmp_unix == unix:
break
if tmp_unix > unix:
cur_month -= 1
break
if (current_year % 4 == 0 and current_year % 100 != 0) or current_year % 400 == 0:
result = unix - tmp_unix_leap
else:
result = unix - tmp_unix_default
if tmp_unix == unix:
result = 0
return cur_month, result
def get_day(unix):
tmp_unix = 0
tmp_unix_cur = 0
cur_day = 1
while 1:
if tmp_unix_cur > unix:
cur_day -= 1
break
tmp_unix = tmp_unix_cur
tmp_unix_cur += seconds
cur_day += 1
return cur_day, unix - tmp_unix
def get_hour(unix):
cur_hour = 0
tmp_unix = 0
tmp_unix_cur = 0
sec_in_hour = 3600
while 1:
if tmp_unix_cur > unix:
cur_hour -= 1
break
tmp_unix = tmp_unix_cur
tmp_unix_cur += sec_in_hour
cur_hour += 1
return cur_hour, unix - tmp_unix
def get_min_and_sec(unix):
tmp_unix = 0
tmp_unix_cur = 0
sec_in_min = 60
cur_min = 0
while 1:
if tmp_unix_cur > unix:
cur_min -= 1
break
tmp_unix = tmp_unix_cur
tmp_unix_cur += sec_in_min
cur_min += 1
return cur_min, unix - tmp_unix
unix_time = 1493117112
s1 = get_year(unix_time)
year = s1[0]
s2 = get_month(s1[0], s1[1])
month = s2[0]
s3 = get_day(s2[1])
day = s3[0]
s4 = get_hour(s3[1])
hour = s4[0]
s5 = get_min_and_sec(s4[1])
minute = s5[0]
second = s5[1]
time_iso = '{}-{:02}-{:02}T{:02}:{:02}:{:02}+03:00'.format(year, month, day, hour, minute, second)
print(time_iso)
# 1587148255 2020-04-17T18:30:55+03:00
# 1109901663 2005-03-04T02:01:03+03:00
# 1493117112 2017-04-25T10:45:12+03:00
|
[
"[email protected]"
] | |
fe484f2dbfa7363e12c93e00a34759692e113a73
|
f4b8c90c1349c8740c1805f7b6b0e15eb5db7f41
|
/test/test_term_session_item.py
|
7867f29a7aa4a6fd2bb993565b40f161db7abf86
|
[] |
no_license
|
CalPolyResDev/StarRezAPI
|
012fb8351159f96a81352d6c7bfa36cd2d7df13c
|
b184e1863c37ff4fcf7a05509ad8ea8ba825b367
|
refs/heads/master
| 2021-01-25T10:29:37.966602 | 2018-03-15T01:01:35 | 2018-03-15T01:01:35 | 123,355,501 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,044 |
py
|
# coding: utf-8
"""
StarRez API
This is a way to connect with the StarRez API. We are not the developers of the StarRez API, we are just an organization that uses it and wanted a better way to connect to it. # noqa: E501
OpenAPI spec version: 1.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import starrez_client
from starrez_client.models.term_session_item import TermSessionItem # noqa: E501
from starrez_client.rest import ApiException
class TestTermSessionItem(unittest.TestCase):
"""TermSessionItem unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testTermSessionItem(self):
"""Test TermSessionItem"""
# FIXME: construct object with mandatory attributes with example values
# model = starrez_client.models.term_session_item.TermSessionItem() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
eda4b59a482a804a1127f88a7858183bc9a63ff5
|
6b1852a7b33cb5dd8f8c77af4b02e6ef2cd33bb2
|
/process_data.py
|
292f75e400526963235124c85f96d5fc6711f1f5
|
[] |
no_license
|
LianaMikael/Paraphrase-Generation
|
16cd6b7d0208cb9ae674dcede15c65e859b8eb9b
|
612e94a167b84b57002c1561473802046e491b14
|
refs/heads/master
| 2023-02-08T17:34:53.595361 | 2021-01-03T22:01:14 | 2021-01-03T22:01:14 | 290,590,604 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,235 |
py
|
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import re
def collect_ppdb():
sources = []
targets = []
with open('ppdb_all.txt', 'r+') as f:
for line in f:
line = line.split('|||')
if float(line[-1]) >= 3.0:
sources.append(line[0])
targets.append(line[1])
return sources, targets
def collect_quora():
sources = []
targets = []
data = pd.read_csv('quora_duplicate_questions.tsv', sep="\t")
data = np.array(data)
data = data[data[:,-1]==1] # only collect true paraphrases
for row in data:
sources.append(row[-3])
targets.append(row[-2])
return sources, targets
def collect_language_net():
sources = []
targets = []
with open('2016_Oct_10--2017_Jan_08_paraphrase.txt', 'r+') as f:
for line in f:
line = line.split('\t')
if len(line) == 2:
sources.append(line[0].strip())
targets.append(line[1].strip())
return sources, targets
def save_to_file(out_file, sources, targets):
for i in range(len(sources)):
source_string = re.sub(r'\W+ ', '', sources[i])
target_string = re.sub(r'\W+ ', '', targets[i])
out_file.write('{},{}\n'.format(source_string, target_string))
out_file.close()
if __name__ == '__main__':
out_f_train = open('train_data_all.csv', 'w+')
out_f_val = open('val_data_all.csv', 'w+')
out_f_test = open('test_data_all.csv', 'w+')
ppdb_sources, ppdb_targets = collect_ppdb()
quora_sources, quora_targets = collect_quora()
ln_sources, ln_targets = collect_language_net()
all_data = list(zip(ppdb_sources + quora_sources + ln_sources, ppdb_targets + quora_targets + ln_targets))
source_train, source_val, target_train, target_val = train_test_split([x[0] for x in all_data], [x[1] for x in all_data], test_size=0.05)
source_val, source_test, target_val, target_test = train_test_split(source_val, target_val, test_size=0.2)
save_to_file(out_f_train, source_train, target_train)
save_to_file(out_f_val, source_val, target_val)
save_to_file(out_f_test, source_test, target_test)
|
[
"[email protected]"
] | |
10e0325e5b08eac3a5535c61283db715fcf39bb1
|
47ea61cf24fb864c6bea0c6ef47436625b25105e
|
/project_6.py
|
f16a3dd78c12faa584d252146ac28eefa55e35d5
|
[] |
no_license
|
eun77/Voronoi-Algorithm-Center-points
|
9973cde9ca19ae5689ce9757d6563045d77b1132
|
5104ef17fafcca12e320774ff47ebc2f1769796d
|
refs/heads/master
| 2020-06-21T18:13:03.699186 | 2019-07-18T06:32:58 | 2019-07-18T06:32:58 | 197,523,347 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,531 |
py
|
from PIL import Image
import random
import math
def generate_voronoi_diagram(width, height):
image = Image.new("RGB", (width, height))
putpixel = image.putpixel
imgx, imgy = image.size
nx = []
ny = []
nr = []
ng = []
nb = []
nx = [20, 40, 40, 40, 40, 60, 60, 60, 60, 80, 80, 120, 120, 140, 140, 140, 140, 160, 160, 160, 160, 180]
ny = [50, 20, 40, 60, 80, 20, 40, 60, 80, 40, 60, 40, 60, 20, 40, 60, 80, 20, 40, 60, 80, 50]
nr = [215, 243, 230, 222, 200, 234, 210, 213, 213, 210, 213, 213, 214, 234, 234, 215, 243, 230, 222, 200, 234, 210, 213, 213, 210, 213, 213, 214, 234, 215]
ng = [225, 233, 230, 200, 206, 213, 223, 245, 210, 213, 214, 214, 215, 234, 212, 225, 233, 230, 200, 206, 213, 223, 245, 210, 213, 214, 214, 215, 234, 225]
nb = [215, 243, 210, 200, 100, 220, 235, 245, 210, 231, 234, 231, 234, 253, 213, 215, 243, 210, 200, 100, 220, 235, 245, 210, 231, 234, 231, 234, 253, 215]
num_cells = len(nx)
print(nx,"\n",ny)
print(imgx, imgy)
for y in range(imgy):
for x in range(imgx):
dmin = math.hypot(imgx, imgy)
j = -1
for i in range(num_cells):
d = math.hypot(nx[i]-x, ny[i]-y)
if d < dmin:
dmin = d
j = i
putpixel((x, y), (nr[j], ng[j], nb[j]))
for i in range(22):
image.putpixel((nx[i],ny[i]), (0,0,0))
image.save("VoronoiDiagram.png", "PNG")
image.show()
if __name__== "__main__":
generate_voronoi_diagram(200, 100)
|
[
"[email protected]"
] | |
ce6a75c3e25f17454a75327f11652b8aa4e4197d
|
4c68051b3ade462240f063a6dbdea690bcc1457f
|
/gsb_intention.py
|
99cc34ef91b70f8ebb0afc9bd0b1e687a8d544fb
|
[] |
no_license
|
gaohang/search
|
42cfac8318b2465f3b2df42de46c5ac05b921ca8
|
81459a5f43bf308ea6f5e77afb87c05d4113a7a0
|
refs/heads/master
| 2023-07-12T06:37:20.895644 | 2021-08-27T07:46:56 | 2021-08-27T07:46:56 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,778 |
py
|
# encoding=utf-8
import pandas as pd
import json
from query_parser_search import QueryParserSearch
from main_search_search import MainSearchSearch
from config import p_date_today
import os
if __name__ == '__main__':
cwd, filename = os.path.split(os.path.realpath(__file__))
print(cwd)
gsb = open(cwd+'/gsb_results/gsb_intention_qp_v7_1.csv', 'w', encoding='utf-8')
size = 20
base = MainSearchSearch(['main_search_online'])
new = base
base_query_parser = QueryParserSearch(['query_parser_v6_online'])
# new_query_parser = QueryParserSearch(['query_parser_v7_online'])
new_query_parser_1 = QueryParserSearch(['query_parser_v7_1_online'])
base_search_template_id = 'main_search_search_v7'
new_search_template_id = base_search_template_id
gsb.write('GSB: base VS new_0\n\ngood:1,\tsame:2,\tbad:3\n')
path_sample = cwd+"/data/sample_keywords.json"
total, n_same, n_equal_len, n_top_same = 0, 0, 0, 0
with open(path_sample, "r", encoding='utf-8') as f:
for idx,l in enumerate(f.readlines()):
total += 1
r = json.loads(l)
base_params = {
"keyword": r['keyword'],
"area_id": 101,
"sale_date": p_date_today,
"show_time": p_date_today + " 10:00:00",
"from": 0,
"size": size
}
base_result = base.search(base_params, template_id=base_search_template_id, query_parser=base_query_parser)
# new_result_0 = new.search(base_params, template_id=new_search_template_id, query_parser=new_query_parser)
new_result_0 = new.search(base_params, template_id=new_search_template_id, query_parser=new_query_parser_1)
gsb.write(
r['keyword'] + '\t[base]\t' + "|correct:" + base_result.get('correct', '') + '|core_words:' + base_result.get('core_words', '') +
'\t|cat_l1: ' + str(base_result.get('cat_l1', '')) + '\t|cat_l2: ' + str(
base_result.get('cat_l2', '')) + '\n')
gsb.write(
r['keyword'] + '\t[new_0]\t' + "|correct:" + new_result_0.get('correct', '') + '|core_words:' + new_result_0.get('core_words', '') +
'\t|cat_l1: ' + str(new_result_0.get('cat_l1', '')) + '\t|cat_l2: ' + str(
new_result_0.get('cat_l2', '')) + '\n')
line = ['' for i in range(size)]
n_base, n_new0 = len(base_result['docs']), len(new_result_0['docs'])
if n_base==n_new0:
n_equal_len += 1
gsb.write("<len equal>\n")
else:
gsb.write("<len diff>\n")
if n_base==0 and n_new0==0:
n_same += 1
gsb.write("<gsb same>empty results\n")
continue
max_line = max(len(base_result['docs']), len(new_result_0['docs']), 1)
same = 0
for i in range(max_line):
line[i] += '\t' + str(i + 1) + '\n'
if i < min(len(base_result['docs']), len(new_result_0['docs'])):
if new_result_0['docs'][i]['product_name'] == base_result['docs'][i]['product_name'] :
tags = ','.join(base_result['docs'][i]['tags'].split(" ")) if 'tags' in base_result['docs'][i].keys() else ""
line[i] += "\t[base=new]" + base_result['docs'][i]['product_name'] + '\t|cat_l1: ' + str(
base_result['docs'][i]['cat_l1'][0]) + '\t|cat_l2: ' + str(
base_result['docs'][i]['cat_l2'][0]) + '\t|tag: ' + tags + '\t|score:' + str(
base_result['docs'][i]['score'])
line[i] += '\n'
same += 1
continue
if base_result['total_docs'] >= i + 1:
tags = ','.join(base_result['docs'][i]['tags'].split(" ")) if 'tags' in base_result['docs'][i].keys() else ""
line[i] += "\t[base] " + base_result['docs'][i]['product_name'] + '\t|cat_l1: ' + str(
base_result['docs'][i]['cat_l1'][0]) + '\t|cat_l2: ' + str(
base_result['docs'][i]['cat_l2'][0]) + '\t|tag: ' + tags + '\t|score:' + str(
base_result['docs'][i]['score'])
else:
line[i] += "\t[base]"
line[i] += '\n'
if new_result_0['total_docs'] >= i + 1:
tags = ','.join(new_result_0['docs'][i]['tags'].split(" ")) if 'tags' in new_result_0['docs'][i].keys() else ""
line[i] += "\t[new_0] " + new_result_0['docs'][i]['product_name'] + '\t|cat_l1: ' + str(
new_result_0['docs'][i]['cat_l1'][0]) + ' \t|cat_l2: ' + str(
new_result_0['docs'][i]['cat_l2'][0]) + '\t|tag: ' + tags + '\t|score:' + str(
new_result_0['docs'][i]['score'])
else:
line[i] += "\t[new_0]"
line[i] += '\n\n'
if same >= max(len(base_result['docs']), len(new_result_0['docs'])):
gsb.write('<gsb same>\n\n')
n_same += 1
n_top_same += 1
continue
elif same>3:
gsb.write('<gsb top same>\n\n')
n_top_same += 1
for i in range(max_line):
gsb.write(line[i])
else:
gsb.write('<gsb diff>\n')
for i in range(max_line):
gsb.write(line[i])
gsb.close()
summary = "Total:{}, n_same:{}, n_top_same:{}, n_equal_len:{}".format(total, n_same, n_top_same, n_equal_len)
print(summary)
|
[
"[email protected]"
] | |
bab3b9c06cd3458d4a355e11325a5729132b1ce1
|
eea1006480b1de285a102cc4223db193133baa16
|
/hw_3/hw03_normal.py
|
7159aca10d933db39a7b8386004ce10a39cfc70a
|
[] |
no_license
|
kostcher/Python
|
de5e0515f41c1fe957ab14370054e41e7309873c
|
7fb0e22254a41ccf79b9f14bfbee088e0bcaf01a
|
refs/heads/master
| 2020-03-30T14:37:06.395892 | 2018-12-19T19:56:39 | 2018-12-19T19:56:39 | 151,326,877 | 0 | 0 | null | 2018-12-19T19:56:49 | 2018-10-02T21:38:37 |
Python
|
UTF-8
|
Python
| false | false | 2,470 |
py
|
# Задание-1:
# Напишите функцию, возвращающую ряд Фибоначчи с n-элемента до m-элемента.
# Первыми элементами ряда считать цифры 1 1
def fibonacci(n, m):
if n <= 0 or m <= 0 or m < n:
return []
fibonacci_row = [1, 1]
for i in range(1, m - 1):
fibonacci_row.append(
fibonacci_row[i] + fibonacci_row[i - 1]
)
return fibonacci_row[n - 1:]
print(fibonacci(7, 10))
# Задача-2:
# Напишите функцию, сортирующую принимаемый список по возрастанию.
# Для сортировки используйте любой алгоритм (например пузырьковый).
# Для решения данной задачи нельзя использовать встроенную функцию и метод sort()
my_list = [8, 3, 6, 1, 0]
def bubble_sort(my_list):
for i in range(0, len(my_list)):
for j in range(i + 1, len(my_list)):
if my_list[i] > my_list[j]:
tmp = my_list[i]
my_list[i] = my_list[j]
my_list[j] = tmp
return my_list
print(bubble_sort(my_list))
# Задача-3:
# Напишите собственную реализацию стандартной функции filter.
# Разумеется, внутри нельзя использовать саму функцию filter.
def is_even(number):
if number % 2 == 0:
return True
return False
my_list = [1, 2, 2, 5, 6, 8, 11]
def my_filter(func, iterated_obj):
for value in iterated_obj:
if not func(value):
iterated_obj.remove(value)
return iterated_obj
print(my_filter(is_even, my_list))
# Задача-4:
# Даны четыре точки А1(х1, у1), А2(x2 ,у2), А3(x3 , у3), А4(х4, у4).
# Определить, будут ли они вершинами параллелограмма.
a, b, c, d = (1, 3), (4, 7), (2, 8), (-1, 4)
def is_vertex_parallelogram(a, b, c, d):
import math
ab = math.sqrt((b[0] - a[0])**2 + (b[1] - a[1])**2)
dc = math.sqrt((c[0] - d[0])**2 + (c[1] - d[1])**2)
ad = math.sqrt((d[0] - a[0])**2 + (d[1] - a[1])**2)
bc = math.sqrt((c[0] - b[0])**2 + (c[1] - b[1])**2)
if ab == dc and ad == bc:
return True
return False
print(is_vertex_parallelogram(a, b, c, d))
|
[
"[email protected]"
] | |
4c340cb45e7aef300a6fcf35ac7fb96f061e341f
|
d6b3399b6c87137f97a3ea03e77cf00ef043144b
|
/titanium-reset-appc.sh
|
540b1ad41bf4427793f8b25296094b616e2e520d
|
[] |
no_license
|
deanrock/ios-continuous-integration
|
9301f1fd82544f08dd1a3e3923789aadb94639ca
|
77f3a1a8962f355e82411cb4e28e42f6196026dd
|
refs/heads/master
| 2020-12-24T07:01:19.481134 | 2017-01-16T22:43:52 | 2017-01-16T22:43:52 | 58,654,479 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 491 |
sh
|
#!/usr/bin/python
import uuid
import sys
path = sys.argv[1]
new = []
with open(path) as f:
data = f.readlines()
for line in data:
if line.strip().startswith('<property name="appc-'):
continue
if line.strip().startswith('<property name="acs-'):
continue
if line.strip().startswith('<guid>'):
line = ' <guid>%s</guid>\n' % (str(uuid.uuid1()))
new.append(line)
with open(path, 'w') as f:
f.writelines(new)
|
[
"[email protected]"
] | |
05a6fb23df52b0619efa62fd7632a2128972f524
|
d4ae9cbb7438397a33d7ac6754716db2f4fabd6c
|
/HelloPython/study/HelloPython111.py
|
36da8e90c92ef59ec2593ce5a85b29690ef8caf1
|
[
"Apache-2.0"
] |
permissive
|
treason258/TreLibrary
|
82cf0f3d7c1a67a18c86cf3c8d6566507db2ca79
|
ccf463763518e40696a2cb7e7b95d6a7644fcf6b
|
refs/heads/master
| 2023-01-08T10:25:33.097219 | 2021-10-15T06:06:10 | 2021-10-15T06:06:10 | 64,649,980 | 0 | 0 |
Apache-2.0
| 2023-01-03T15:40:27 | 2016-08-01T08:31:08 |
Java
|
UTF-8
|
Python
| false | false | 2,757 |
py
|
# -*- coding:utf-8 -*-
import urllib
import urllib2
import re
import os
import time
class HelloSpider(object):
num = 0;
dirStr = 'Downloads/python/HelloSpider44'
imgStr = 'Downloads/python/HelloSpider44/0.jpg'
# print urllib2.urlopen("https://img.q6pk.com/image/20181119/", context=context).read()
def __init__(self):
pass
def getImagePageRange(self, fromPage, toPage):
mkdirStr = 'mkdir ' + HelloSpider.dirStr;
print mkdirStr
os.system(mkdirStr) #创建保存图片的目录
# mkImgStr = 'touch file ' + HelloSpider.imgStr;
# print mkImgStr
# os.system(mkImgStr) #创建保存图片的目录
i = int(fromPage)
while i <= int(toPage):
# url = "http://www.dbmeinv.com/?pager_offset=" + str(i)
# url = "https://www.dbmeinv.com/index.htm?pager_offset=" + str(i)
url = "https://www.dbmeinv.com/index.htm?cid=4&pager_offset=" + str(i)
print url
print "\n第%d页" % i
self.getImageFormUrl(url)
i += 1
def getImageFormUrl(self, url):
headers = {"User-Agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/1 7.0.963.56 Safari/535.11"}
request = urllib2.Request(url, headers = headers)
response = urllib2.urlopen(request)
text = response.read()
# print text
p1 = r"(?<=\(this\);\" src=\").+?\.jpg(?=\" />)"
pattern = re.compile(p1)
imgs = pattern.findall(text)
print imgs
for img in imgs:
imageName = HelloSpider.dirStr + ("/%d.jpg" % (HelloSpider.num))
imageUrl = img
if HelloSpider.num == 3:
imageUrl = 'https://img.q6pk.com/image/20181119/0ba051e0b7747bc8cce970b81cfa0584_938_1370.jpg'
print imageUrl
self.saveImage(imageUrl, imageName)
HelloSpider.num += 1
def saveImage(self, imageUrl, imageName):
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
headers = {"User-Agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/1 7.0.963.56 Safari/535.11"}
request = urllib2.Request(imageUrl, headers = headers)
imageData = urllib2.urlopen(request).read()
with open(imageName, "wb") as f:
f.write(imageData)
print '正在保存图片:', imageName
time.sleep(0.1)
helloSpider = HelloSpider()
# fromPage = raw_input("输入开始页:")
# toPage = raw_input("输入结束页:")
# helloSpider.getImagePageRange(fromPage, toPage)
helloSpider.getImagePageRange(11, 11)
|
[
"[email protected]"
] | |
581d49c1b52ad71809a42f76fa48e98a89cd7a70
|
f40ba0cc14c227320636bb26ff4c18c433bbd796
|
/oxemHeroes/gameMember/apps.py
|
df3241a5745a28148dbd19f7e53b29d19cfdcd65
|
[] |
no_license
|
mLegeay/Oxem-heroes
|
432bf717a0ea96174ef838318f29fea42e500f4a
|
5dbfd0c1bbd1f92f4678ef55acbef718db4d8f84
|
refs/heads/master
| 2020-04-25T05:03:16.659607 | 2019-03-21T13:44:18 | 2019-03-21T13:44:18 | 172,530,179 | 0 | 0 | null | 2019-03-21T13:44:19 | 2019-02-25T15:17:44 |
Python
|
UTF-8
|
Python
| false | false | 95 |
py
|
from django.apps import AppConfig
class GamememberConfig(AppConfig):
name = 'gameMember'
|
[
"[email protected]"
] | |
02e1b1ac9d7ca0fcf0fa59318c57df5d46403f9d
|
16809bf25066488f2f32f154dadef3e30c68ae68
|
/sine_wave.py
|
0ed35aeb1f8d136868fdb4c3053a10605cc1bcdf
|
[] |
no_license
|
aidiary/signal_processing
|
0db6d1a9662ccd0fe232ccc461e9b27174c8ef88
|
4c1cb8ceee3a1527f38b8dbf9ffa1a737d06b577
|
refs/heads/master
| 2021-01-13T03:44:32.721301 | 2016-12-23T13:40:10 | 2016-12-23T13:40:10 | 77,221,395 | 1 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,762 |
py
|
#coding: utf-8
import wave
import struct
import numpy as np
from pylab import *
def createSineWave (A, f0, fs, length):
"""振幅A、基本周波数f0、サンプリング周波数 fs、
長さlength秒の正弦波を作成して返す"""
data = []
# [-1.0, 1.0]の小数値が入った波を作成
for n in arange(length * fs): # nはサンプルインデックス
s = A * np.sin(2 * np.pi * f0 * n / fs)
# 振幅が大きい時はクリッピング
if s > 1.0: s = 1.0
if s < -1.0: s = -1.0
data.append(s)
# [-32768, 32767]の整数値に変換
data = [int(x * 32767.0) for x in data]
# plot(data[0:100]); show()
# バイナリに変換
data = struct.pack("h" * len(data), *data) # listに*をつけると引数展開される
return data
def play (data, fs, bit):
import pyaudio
# ストリームを開く
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paInt16,
channels=1,
rate=int(fs),
output= True)
# チャンク単位でストリームに出力し音声を再生
chunk = 1024
sp = 0 # 再生位置ポインタ
buffer = data[sp:sp+chunk]
while buffer != '':
stream.write(buffer)
sp = sp + chunk
buffer = data[sp:sp+chunk]
stream.close()
p.terminate()
def save(data, fs, bit, filename):
"""波形データをWAVEファイルへ出力"""
wf = wave.open(filename, "w")
wf.setnchannels(1)
wf.setsampwidth(bit / 8)
wf.setframerate(fs)
wf.writeframes(data)
wf.close()
if __name__ == "__main__" :
data = createSineWave(0.25, 250, 8000.0, 1.0)
play(data, 8000, 16)
save(data, 8000, 16, "sine.wav")
|
[
"[email protected]"
] | |
1d780241475488e4db572beaa12329618d3bf1a4
|
d9a1ab1a63b5e32917f8bf0235f2a3d5dda0859e
|
/6.5.3.py
|
01839d6d67fa29a1ceb8dc82b0510750f69cbeb7
|
[] |
no_license
|
ianmlunaq/edhesive-python
|
a2123e0dd814ea82daa2b6d4c365bafc718e6faa
|
25def8fa2728e7e388866dc7451b0a523adea291
|
refs/heads/main
| 2023-04-01T14:08:32.362019 | 2021-04-06T03:53:43 | 2021-04-06T03:53:43 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 92 |
py
|
# 6.5.3.py | ian luna | 2020.04.24
z = 0
for x in range(99, 0, -1):
z = z+x
print(z)
|
[
"[email protected]"
] | |
0927f40911c491e95f265c70fcf2b07e8abf0276
|
0da6461f39f243e1e2b6eef346da29e3f13ec198
|
/guest/sign/migrations/0001_initial.py
|
6c6c647f3f68a2b483ee6da898545d0d7374024b
|
[] |
no_license
|
galypso/pydj
|
36865f5b86db7a4be33bb8402e44051a46685cf4
|
c9b5b26ac61d2c1d48ff25e3e3240572a28271f4
|
refs/heads/master
| 2020-06-01T16:08:07.358258 | 2017-06-13T05:39:47 | 2017-06-13T05:39:47 | 94,079,377 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,662 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-06-12 05:40
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('limit', models.IntegerField()),
('status', models.BooleanField()),
('address', models.CharField(max_length=200)),
('start_time', models.DateTimeField(verbose_name='event time')),
('create_time', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Guest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('realname', models.CharField(max_length=64)),
('phone', models.CharField(max_length=16)),
('email', models.EmailField(max_length=254)),
('sign', models.BooleanField()),
('create_time', models.DateTimeField(auto_now=True)),
('event', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sign.Event')),
],
),
migrations.AlterUniqueTogether(
name='guest',
unique_together=set([('event', 'phone')]),
),
]
|
[
"[email protected]"
] | |
a844e2ab5bb2d5944f55a1af4f41746ccd02dfc2
|
c8ebf4217a96c1a3afc978ac94bdc8738f5120a3
|
/staff/utils.py
|
a207c7a173b04ac4d870cc8b2dab4e17ddfa716a
|
[] |
no_license
|
vinux84/dept2
|
240bea22205a2a36a88484e669151996c43e5ba8
|
12a558453b22cf00da0001f15225f1c6b37d71ab
|
refs/heads/master
| 2021-09-06T17:20:36.831910 | 2018-02-08T23:08:06 | 2018-02-08T23:08:06 | 112,055,830 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,411 |
py
|
import random
import string
from django.utils.text import slugify
'''
random_string_generator is located here:
http://joincfe.com/blog/random-string-generator-in-python/
'''
DONT_USE = ["create"]
def random_string_generator(size=10, chars=string.ascii_lowercase + string.digits):
return "".join(random.choice(chars) for _ in range(size))
def unique_slug_generator(instance, new_slug=None): # this instance will take in a object from a model.
"""
This is for a Django project and it assumes your instance
has a model with a slug field and a title character (char) field.
"""
if new_slug is not None:
slug = new_slug
else:
slug = slugify(instance.title) # we can make this instance.first_name as relation to our model, but left it title, and made a method in models
if slug in DONT_USE:
new_slug = "{slug}-{randstr}".format(
slug=slug,
randstr=random_string_generator(size=4)
)
return unique_slug_generator(instance, new_slug=new_slug)
Klass = instance.__class__
qs_exists = Klass.objects.filter(slug=slug).exists()
if qs_exists:
new_slug = "{slug}-{randstr}".format(
slug=slug,
randstr=random_string_generator(size=4)
)
return unique_slug_generator(instance, new_slug=new_slug)
return slug
|
[
"[email protected]"
] | |
ebaed099d674b260f3ebd706f27ed03c9e10f782
|
b211aa36e31107d7530e2126d537fc0576208302
|
/tests/test_api_veiculos.py
|
5700cd20fdc0bac6f2b37343c29a31e70323e85b
|
[] |
no_license
|
jeffersonSA/CadVeiculosAPI
|
0e7ccf3a7b134e2cb259f7a85d16cf371022e3e8
|
74ec501bc2116a2f02eec96e9cbd5ad2359f60a5
|
refs/heads/master
| 2023-04-20T12:58:04.144461 | 2021-05-15T21:40:10 | 2021-05-15T21:40:10 | 366,793,041 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,693 |
py
|
def test_post_deve_retornar_erro_quando_o_payload_for_incompleto(client):
dado = {'veiculo':'Gol','ano':2020,'vendido':True}
esperado = {'marca': ['Missing data for required field.'], 'descricao': ['Missing data for required field.']}
response = client.post('/api/veiculos',json=dado)
assert response.get_json()['message'] == esperado
def test_post_deve_retornar_erro_quando_o_payload_contiver_a_chave_id(client):
dado = {
'veiculo':'Gol',
'ano':2020,
'vendido':True,
'marca': 'VW',
'descricao': 'Novo',
'id': 1
}
esperado = {'id': ['Não é permitido enviar ID']}
response = client.post('/api/veiculos',json=dado)
assert response.get_json()['message'] == esperado
def test_get_deve_retornar_status_200(client):
assert client.get('/api/veiculos').status_code == 200
def test_get_deve_retornar_dado_depois_de_inserir(client):
dado = {
'veiculo':'Gol',
'ano':2020,
'vendido':True,
'marca': 'VW',
'descricao': 'Novo'
}
response = client.post('/api/veiculos',json=dado)
resp_json = response.get_json()
id = resp_json['id']
esperado = resp_json
response = client.get('/api/veiculos/%s' %id )
assert response.get_json() == esperado
def test_get_deve_retornar_dados_usando_qualquer_texto_digitado(client):
dado = [{
'veiculo':'Celta',
'ano':1990,
'vendido':True,
'marca': 'GM',
'descricao': 'Antigo'
},
{
'veiculo':'Corsa',
'ano':1990,
'vendido':True,
'marca': 'GM',
'descricao': 'Antigo'
},
{
'veiculo':'Gol',
'ano':2021,
'vendido':True,
'marca': 'VW',
'descricao': 'Novo'
}]
client.post('/api/veiculos',json=dado[0])
client.post('/api/veiculos',json=dado[1])
client.post('/api/veiculos',json=dado[2])
sarch_word = 'Anti'
response = client.get('/api/veiculos/find/%s' % sarch_word)
assert len(response.get_json()) >=2
def test_put_deve_atualizar_dado_adicionado(client):
dado = {
'veiculo':'Gol',
'ano':2020,
'vendido':True,
'marca': 'VW',
'descricao': 'Novo'
}
response = client.post('/api/veiculos',json=dado)
resp_json = response.get_json()
id = resp_json['id']
dado = {
'veiculo':'Golzinho',
'ano':2001,
'vendido':False,
'marca': 'VWs',
'descricao': 'seminovo'
}
response = client.put('/api/veiculos/%s' % id,json=dado)
data_resp = response.get_json()['data']
del data_resp['created'], data_resp['updated'], data_resp['id']
assert data_resp == dado
def test_patch_deve_atualizar_somente_atributo_vendido(client):
dado = {
'veiculo':'Audi',
'ano':2020,
'vendido':False,
'marca': 'Audi',
'descricao': 'Novo'
}
response = client.post('/api/veiculos',json=dado)
resp_json = response.get_json()
id = resp_json['id']
dado = {
'vendido':False
}
response = client.patch('/api/veiculos/%s' % id,json=dado)
data_resp = response.get_json()['data']
assert data_resp['vendido'] == False
def test_delete_deve_mostrar_mensagem_deletado_ao_deleltar(client):
dado = {
'veiculo':'Gol',
'ano':2020,
'vendido':True,
'marca': 'VW',
'descricao': 'Novo'
}
response = client.post('/api/veiculos',json=dado)
resp_json = response.get_json()
id = resp_json['id']
response = client.delete('/api/veiculos/%s' % id)
assert response.get_json()['message'] == "Deletado!"
|
[
"[email protected]"
] | |
deb27eae24f4cd46475211751438e904854e037a
|
fcdfe976c9ed60b18def889692a17dc18a8dd6d7
|
/ros/py_ros/kdl_test2.py
|
120f3dc29d4eeaee751accf468dd08397df344f3
|
[] |
no_license
|
akihikoy/ay_test
|
4907470889c9bda11cdc84e8231ef3156fda8bd7
|
a24dfb720960bfedb94be3b4d147e37616e7f39a
|
refs/heads/master
| 2023-09-02T19:24:47.832392 | 2023-08-27T06:45:20 | 2023-08-27T06:45:20 | 181,903,332 | 6 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,517 |
py
|
#!/usr/bin/python
#\file kdl_test2.py
#\brief certain python script
#\author Akihiko Yamaguchi, [email protected]
#\version 0.1
import numpy as np
from kdl_kin import TKinematics
if __name__=='__main__':
np.set_printoptions(precision=3)
print 'Testing TKinematics (robot_description == Yaskawa Motoman is assumed).'
print 'Before executing this script, run:'
print ' rosparam load `rospack find motoman_sia10f_support`/urdf/sia10f.urdf robot_description'
kin= TKinematics(end_link='link_t')
kin.print_robot_description()
DoF= len(kin.joint_names)
q0= [0.0]*DoF
angles= {joint:q0[j] for j,joint in enumerate(kin.joint_names)} #Deserialize
x0= kin.forward_position_kinematics(angles)
print 'q1=',np.array(q1)
print 'x0= FK(q0)=',x0
import random
q1= [3.0*(random.random()-0.5) for j in range(DoF)]
angles= {joint:q1[j] for j,joint in enumerate(kin.joint_names)} #Deserialize
x1= kin.forward_position_kinematics(angles)
print 'q1=',q1
print 'x1= FK(q1)=',x1
seed= [0.0]*DoF
#seed= [3.0*(random.random()-0.5) for j in range(DoF)]
q2= kin.inverse_kinematics(x1[:3], x1[3:], seed=seed, maxiter=2000, eps=1.0e-4) #, maxiter=500, eps=1.0e-6
print 'q2= IK(x1)=',q2
if q2 is not None:
angles= {joint:q2[j] for j,joint in enumerate(kin.joint_names)} #Deserialize
x2= kin.forward_position_kinematics(angles)
print 'x2= FK(q2)=',x2
print 'x2==x1?', np.allclose(x2,x1)
print '|x2-x1|=',np.linalg.norm(x2-x1)
else:
print 'Failed to solve IK.'
|
[
"[email protected]"
] | |
df281f2293585c2a364e111d49a0b6c16f67a4f0
|
518920276b75b7a1c6f4f4cbae83e11f09946351
|
/usedcars1_rodmuesong.py
|
9fe188173c731abf04e25f8c925228f05926ae55
|
[] |
no_license
|
petasus/twocarsale
|
4fd40c3e67b8fa6280d41c57617d8dab663692c6
|
cd21c26721e053f6dc02bc11c5588a863a3b2466
|
refs/heads/master
| 2020-03-27T15:35:14.159112 | 2019-06-28T15:26:29 | 2019-06-28T15:26:29 | 146,727,223 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 12,911 |
py
|
import requests
from bs4 import BeautifulSoup
from upload_data import uploadToSql as uploadDB
import connect
import datetime
import time
keep_sendlink=[] #สร้างฟังก์ชั่นเก็บเว็บไซต์และส่งไปยังอีกไฟล์
db = connect.conDB()
def get_Type(soup): #ประเภทรถ
detail = soup.select("div.content-col div.item-row span")
j=0
k=1000
backup=[]
for i in detail:
backup.append(i.text.strip())
for i in backup:
if(i == "ประเภทรถ"):
k = j+1
j=j+1
if(k != 1000):
ty = backup[k]
if(ty == "ยี่ห้อ"):
ty = "-"
else:
ty = "-"
print(ty)
#while(True):
# CKsql = """ SELECT id FROM type_car WHERE `name`=%s"""
# c = db.cursor()
# CKExis = c.execute(CKsql,(ty))
# if CKExis:
# getID = c.fetchall()
# return getID[0][0]
# else:
# c.execute("""INSERT INTO type_car (`name`) VALUES (%s)""", (ty))
# db.commit()
# continue
def get_Brand(soup): #ยี่ห้อ
detail = soup.select("div.content-col div.item-row span")
j=0
k=1000
backup=[]
for i in detail:
backup.append(i.text.strip())
for i in backup:
if(i == "ยี่ห้อ" ):
k = j+1
j=j+1
if(k != j):
br = (backup[k].lower())
if(br == "BUGATTI"):
br = "Bugatti"
else:
br = "-"
print(br)
#while(True):
# CKsql = """ SELECT id FROM brand WHERE `name`=%s"""
# c = db.cursor()
# CKExis = c.execute(CKsql,(br))
# if CKExis:
# getID = c.fetchall()
# return getID[0][0]
# else:
# c.execute("""INSERT INTO brand (`name`) VALUES (%s)""", (br))
# db.commit()
# continue
def get_Model(soup): #รุ่น
detail = soup.select("div.content-col div.item-row span")
j=0
k=1000
backup=[]
for i in detail:
backup.append(i.text.strip())
for i in backup:
if(i == "รุ่น" ):
k = j+1
j=j+1
if(k != 1000):
mod = (backup[k].lower())
else:
mod = "-"
print(mod)
#TypeCar = get_TypeCar(soup)
#Brand = get_Brand(soup)
#Gear = get_Gear(soup)
#while(True):
# CKsql = """ SELECT id FROM model WHERE `name`=%s AND `bnd_id`=%s AND `typ_id`=%s"""
# c = db.cursor()
# CKExis = c.execute(CKsql,(mo,Brand,TypeCar))
# if CKExis:
# getID = c.fetchall()
# return getID[0][0]
# else:
# c.execute("""INSERT INTO model (`name`,`bnd_id`,`typ_id`,`gears`) VALUES (%s,%s,%s,%s)""", (mo,Brand,TypeCar,Gear))
# db.commit()
def get_Submodel(soup): #รุ่นย่อย
detail = soup.select("div.content-col div.item-row span")
j=0
k=1000
backup=[]
for i in detail:
backup.append(i.text.strip())
for i in backup:
if(i == "รุ่นย่อย" ):
k = j+1
j=j+1
if(k != 1000):
sm = (backup[k].lower())
else:
sm = "-"
print(sm)
def get_Web(soup): #ชื่อเว็บ
we = 'rodmuesong.com'
print(we)
def get_Post(soup): #วันที่โพส
detail = soup.select("div.title-page p.info-title")
backup=[]
months = ['ม.ค','ก.พ','มี.ค','เม.ย','พ.ค','มิ.ย','ก.ค','ส.ค','ก.ย','ต.ค','พ.ย','ธ.ค']
for i in detail:
backup.append(i.text.strip())
bu = backup[0].split(" ")
dd = bu[2]
mm = bu[3]
yy = bu[4]
for i in months:
if i == mm:
mm = str(months.index(i)+1)
if(int(mm) <= 9 ):
mm = "0"+str(mm)
if(int(dd) <= 9 ):
dd = "0"+str(dd)
po = (yy +'-'+ mm +'-'+dd)
print(po)
def get_Price(soup): #ราคา
detail = soup.select("div.left-content p.price")
backup=[]
for i in detail:
backup.append(i.text.strip())
bu = backup[0]
if(bu == "ติดต่อผู้ขาย"):
pr = "0"
else:
bu1 = bu.replace("บาท","")
bu2 = bu1.replace(",","")
pr = bu2.replace(" ","")
print(pr)
def get_Location(soup): #จังหวัด
detail = soup.select("div.title-page p.info-title")
backup=[]
for i in detail:
backup.append(i.text.strip())
bu = backup[0].split(" ")
lo = bu[0]
print(lo)
def get_Year(soup): #รุ่นปี
detail = soup.select("div.content-col div.item-row span")
j=0
k=1000
backup=[]
for i in detail:
backup.append(i.text.strip())
for i in backup:
if(i == "ปีที่ผลิต" ):
k = j+1
j=j+1
if(k != 1000):
ye = backup[k]
else:
ye = "-"
print(ye)
def get_Mile(soup): #เลขไมล์ที่ใช้ไป หน่วยเป็น(กม.)
detail = soup.select("div.content-col div.item-row span")
j=0
k=1000
backup=[]
for i in detail:
backup.append(i.text.strip())
for i in backup:
if(i == "เลขไมล์" ):
k = j+1
j=j+1
if(k != 1000):
mi = backup[k].replace(",","")
else:
mi = "-"
print(mi)
def get_Color(soup): #สีรถ
detail = soup.select("div.content-col div.item-row span")
j=0
k=1000
backup=[]
for i in detail:
backup.append(i.text.strip())
for i in backup:
if(i == "สี" ):
k = j+1
j=j+1
if(k != 1000):
co = backup[k]
else:
co = "-"
print(co)
def get_Gear(soup): #ระบบเกียร์
detail = soup.select("div.content-col div.item-row span")
j=0
k=1000
backup=[]
for i in detail:
backup.append(i.text.strip())
for i in backup:
if(i == "ระบบส่งกำลัง" ):
k = j+1
j=j+1
if(k != 1000):
ge = backup[k]
else:
ge = "-"
print(ge)
def get_Seller(soup): #ชื่อผู้ขาย
detail = soup.select("div.col-box h4")
backup=[]
for i in detail:
backup.append(i.text.strip())
bu = backup[0]
if(bu == ''):
se = "-"
else:
se = bu
print(se)
def get_Tel(soup): #เบอร์ผู้ขาย
detail = soup.select("div.col-box span")
backup=[]
for i in detail:
backup.append(i.text.strip())
te = backup[0].replace(".","")
print(te)
def get_Place(soup): #ที่อยู่
detail = soup.select("div.col-box p")
backup=[]
for i in detail:
backup.append(i.text.strip())
pl = backup[0]
if(pl[0] == "0"):
pl = "-"
print(pl)
def get_description(soup): #รายละเอียด
detail = soup.select("div.description p")
backup=[]
for i in detail:
backup.append(i.text.strip())
de = backup[0]
print(de)
def get_specification(soup): #ข้อมูลจำเพาะทางเทคนิค
detail = soup.select("div.box-border")
backup=[]
for i in detail:
backup.append(i.text.strip())
if(backup == []):
sp = "ไม่มีข้อมูล"
else:
sp = backup[0]
print(sp)
def get_Image(soup):
detail = soup.select("a.imageGallery img")
j=0
k=0
im=""
backup=[]
for i in detail:
backup.append(i['src'])
j+=1
if(j==0):
im = "-"
else:
while(k != j):
im += backup[k]+" "
k+=1
print(im)
def get_CheckUpdate(soup):
detail = soup.select("div.title-page p.info-title")
backup=[]
months = ['ม.ค','ก.พ','มี.ค','เม.ย','พ.ค','มิ.ย','ก.ค','ส.ค','ก.ย','ต.ค','พ.ย','ธ.ค']
for i in detail:
backup.append(i.text.strip())
print(backup)
if(backup == []):
chd = 0
else:
bu = backup[0].split(" ")
dd = bu[2]
mm = bu[3]
yy = bu[4]
yy = int(yy)-2543
for i in months:
if(i == mm):
mm = str(months.index(i)+1)
if(int(mm) <= 9 ):
mm = "0"+str(mm)
if(int(dd) <= 9 ):
dd = "0"+str(dd)
day = str(mm)+"/"+str(dd)+"/"+str(yy)
xx = datetime.datetime.now()
xd = xx.strftime("%x")
if(day == xd):
chd = 0
else:
chd = 1
print(chd)
return(chd)
def get_ErrorCheck(soup):
detail = soup.select("div.title h4.fweight-bold")
backup=[]
for i in detail:
backup.append(i.text.strip())
if(backup == []):
bu = 1
else:
bu = 0
print(bu)
return(bu)
def Main(links):
#Car_upload=[]
j=1
for i in links:
print("link no." + str(j) + " " + i)
while True:
try:
r = requests.get(i)
break
except:
print("มีปัญหากลับไปรีเควสใหม่")
print("ที่ลิ้ง: "+str(i))
time.sleep(8)
continue
soup = BeautifulSoup(r.text, "lxml")
j+=1
CarDetail = {}
CarDetail['err'] = get_ErrorCheck(soup)
if(CarDetail['err']== 0):
continue
CarDetail['che'] = get_CheckUpdate(soup)
if(CarDetail['che']== 0):
continue
#CarDetail['typ'] = get_Type(soup)###
#CarDetail['bra'] = get_Brand(soup)###
#CarDetail['mod'] = get_Model(soup)###
#CarDetail['sub'] = get_Submodel(soup)###
#CarDetail['gea'] = get_Gear(soup)###
CarDetail['web'] = get_Web(soup)
CarDetail['pos'] = get_Post(soup)
CarDetail['pri'] = get_Price(soup)
CarDetail['loc'] = get_Location(soup)
CarDetail['yea'] = get_Year(soup)
CarDetail['mil'] = get_Mile(soup)
CarDetail['col'] = get_Color(soup)
CarDetail['sel'] = get_Seller(soup)
CarDetail['tel'] = get_Tel(soup)
CarDetail['pla'] = get_Place(soup)
CarDetail['des'] = get_description(soup)
###CarDetail['cla'] = get_description(soup)#อุบัติเหตุ ชน น้ำท่วม แต่ง ติดแก๊ส
###CarDetail['pro'] = get_description(soup)#โปรโมชั่น ส่วนลด ดาวน์
###CarDetail['ser'] = get_description(soup)#รับประกันหลังการขาย
CarDetail['spe'] = get_specification(soup)
CarDetail['img'] = get_Image(soup)
###CarDetail['dup'] = get_duplicate(soup) #check ซ้ำ
###CarDetail['upd'] = get_update(soup) #updatedatabase
#Car_upload.append(CarDetail)
#uploadDB(Car_upload)
def getLink():
print("Start getLink")
url_to_scrape = 'https://rodmuesong.com/รถสำหรับขาย/p1' #website
while True:
try:
r = requests.get(url_to_scrape)
break
except:
print("มีปัญหากลับไปรีเควสใหม่")
print("ที่ลิ้ง: "+str(url_to_scrape))
time.sleep(2)
continue
soup = BeautifulSoup(r.text, "lxml")
num_car = soup.select("span.result") #จำนวนรถทั้งหมด
for i in num_car: #ลูปหาจำนวนหน้ามากที่สุด
k = i.text.strip().split(" ")
k = k[1].replace(",","")
maxpage = (int(k)//10)+1
print(maxpage)
count=maxpage #maxpage 12479
num=1
j=0
while(num != count):
print("page "+str(num))
url_num = 'https://rodmuesong.com/รถสำหรับขาย/p'+str(num)+''
while True:
try:
r = requests.get(url_num)
break
except:
print("มีปัญหากลับไปรีเควสใหม่")
print("ที่ลิ้ง: "+str(url_num))
time.sleep(3)
continue
soup = BeautifulSoup(r.text,"lxml")
url_linkcar = soup.select("div.content-page div.row div.thumb-img a") #linkของรถแต่ละคัน
for i in url_linkcar:
print("link "+str(j+1)+i['href'])
keep_sendlink.append('https://rodmuesong.com'+i['href'])
j+=1
num+=1
print("End getLink")
def getSendLink():
print("Start Rodmuesong")
getLink()
print("Start getSendLink")
Main(keep_sendlink)
print("End getSendLink")
print("End Rodmuesong")
getSendLink()
|
[
"[email protected]"
] | |
a3ddf601d4a7e710a877f8f29f465f4233f64e46
|
69110d606c5075698620af1c060a08aea6bc83d6
|
/user_messages/user_messages/models.py
|
b41a1f83f0fb4234018a4597ddaaa3b7da69590f
|
[] |
no_license
|
anusha-vijaykumar/NextNeighbour
|
258662023498a88e786615886aafbc1a5459b479
|
682142bd196a27e7b65969e889e3b20eb790a0da
|
refs/heads/master
| 2021-01-10T13:49:18.619431 | 2016-03-08T03:41:31 | 2016-03-08T03:41:31 | 53,366,102 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,795 |
py
|
from datetime import datetime
from django.core.urlresolvers import reverse
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from user_messages.managers import ThreadManager, MessageManager
from user_messages.utils import cached_attribute
class Thread(models.Model):
subject = models.CharField(max_length=150)
users = models.ManyToManyField(User, through="UserThread")
objects = ThreadManager()
def get_absolute_url(self):
return reverse("messages_thread_detail", kwargs={"thread_id": self.pk})
@property
@cached_attribute
def first_message(self):
return self.messages.all()[0]
@property
@cached_attribute
def latest_message(self):
return self.messages.order_by("-sent_at")[0]
@classmethod
def ordered(cls, objs):
"""
Returns the iterable ordered the correct way, this is a class method
because we don"t know what the type of the iterable will be.
"""
objs = list(objs)
objs.sort(key=lambda o: o.latest_message.sent_at, reverse=True)
return objs
class UserThread(models.Model):
thread = models.ForeignKey(Thread)
user = models.ForeignKey(User)
unread = models.BooleanField()
deleted = models.BooleanField()
class Message(models.Model):
thread = models.ForeignKey(Thread, related_name="messages")
sender = models.ForeignKey(User, related_name="sent_messages")
sent_at = models.DateTimeField(default=timezone.now)
content = models.TextField()
objects = MessageManager()
class Meta:
ordering = ("sent_at",)
def get_absolute_url(self):
return self.thread.get_absolute_url()
|
[
"[email protected]"
] | |
f788c19f9b7cd4e666685b717504ad5e80a33c46
|
0d87c9a03cf5874851cc7923669e2b7a3241c630
|
/gluoncv/torch/model_zoo/action_recognition/i3d_slow.py
|
e1f8173ccee17096ca1ddc6652c2eda916921adb
|
[
"Apache-2.0"
] |
permissive
|
Kh4L/gluon-cv
|
0f5a771ac44059ac8d562f5ae4502698b1537ee4
|
849411ed56632cd854850b07142087d599f97dcb
|
refs/heads/master
| 2021-06-28T21:39:38.859719 | 2020-11-21T01:18:53 | 2020-11-21T01:18:53 | 186,614,638 | 0 | 0 |
Apache-2.0
| 2019-05-14T12:11:12 | 2019-05-14T12:11:12 | null |
UTF-8
|
Python
| false | false | 20,813 |
py
|
# pylint: disable=missing-function-docstring, line-too-long
"""
SlowFast Networks for Video Recognition
ICCV 2019, https://arxiv.org/abs/1812.03982
Code adapted from https://github.com/open-mmlab/mmaction and
https://github.com/decisionforce/TPN
"""
import torch
import torch.nn as nn
import torch.utils.checkpoint as cp
from .non_local import build_nonlocal_block
__all__ = ['ResNet_SlowFast', 'i3d_slow_resnet50_f32s2_kinetics400',
'i3d_slow_resnet50_f16s4_kinetics400', 'i3d_slow_resnet50_f8s8_kinetics400',
'i3d_slow_resnet101_f32s2_kinetics400', 'i3d_slow_resnet101_f16s4_kinetics400',
'i3d_slow_resnet101_f8s8_kinetics400', 'i3d_slow_resnet50_f32s2_custom']
def conv3x3x3(in_planes, out_planes, spatial_stride=1, temporal_stride=1, dilation=1):
"3x3x3 convolution with padding"
return nn.Conv3d(in_planes,
out_planes,
kernel_size=3,
stride=(temporal_stride, spatial_stride, spatial_stride),
padding=dilation,
dilation=dilation,
bias=False)
def conv1x3x3(in_planes, out_planes, spatial_stride=1, temporal_stride=1, dilation=1):
"1x3x3 convolution with padding"
return nn.Conv3d(in_planes,
out_planes,
kernel_size=(1, 3, 3),
stride=(temporal_stride, spatial_stride, spatial_stride),
padding=(0, dilation, dilation),
dilation=dilation,
bias=False)
class Bottleneck(nn.Module):
"""Bottleneck block for ResNet.
If style is "pytorch", the stride-two layer is the 3x3 conv layer,
if it is "caffe", the stride-two layer is the first 1x1 conv layer.
"""
expansion = 4
def __init__(self,
inplanes,
planes,
spatial_stride=1,
temporal_stride=1,
dilation=1,
downsample=None,
style='pytorch',
if_inflate=True,
inflate_style='3x1x1',
if_nonlocal=True,
nonlocal_cfg=None,
with_cp=False):
super(Bottleneck, self).__init__()
assert style in ['pytorch', 'caffe']
assert inflate_style in ['3x1x1', '3x3x3']
self.inplanes = inplanes
self.planes = planes
if style == 'pytorch':
self.conv1_stride = 1
self.conv2_stride = spatial_stride
self.conv1_stride_t = 1
self.conv2_stride_t = temporal_stride
else:
self.conv1_stride = spatial_stride
self.conv2_stride = 1
self.conv1_stride_t = temporal_stride
self.conv2_stride_t = 1
if if_inflate:
if inflate_style == '3x1x1':
self.conv1 = nn.Conv3d(
inplanes,
planes,
kernel_size=(3, 1, 1),
stride=(self.conv1_stride_t, self.conv1_stride, self.conv1_stride),
padding=(1, 0, 0),
bias=False)
self.conv2 = nn.Conv3d(
planes,
planes,
kernel_size=(1, 3, 3),
stride=(self.conv2_stride_t, self.conv2_stride, self.conv2_stride),
padding=(0, dilation, dilation),
dilation=(1, dilation, dilation),
bias=False)
else:
self.conv1 = nn.Conv3d(
inplanes,
planes,
kernel_size=1,
stride=(self.conv1_stride_t, self.conv1_stride, self.conv1_stride),
bias=False)
self.conv2 = nn.Conv3d(
planes,
planes,
kernel_size=3,
stride=(self.conv2_stride_t, self.conv2_stride, self.conv2_stride),
padding=(1, dilation, dilation),
dilation=(1, dilation, dilation),
bias=False)
else:
self.conv1 = nn.Conv3d(
inplanes,
planes,
kernel_size=1,
stride=(1, self.conv1_stride, self.conv1_stride),
bias=False)
self.conv2 = nn.Conv3d(
planes,
planes,
kernel_size=(1, 3, 3),
stride=(1, self.conv2_stride, self.conv2_stride),
padding=(0, dilation, dilation),
dilation=(1, dilation, dilation),
bias=False)
self.bn1 = nn.BatchNorm3d(planes)
self.bn2 = nn.BatchNorm3d(planes)
self.conv3 = nn.Conv3d(
planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm3d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.spatial_tride = spatial_stride
self.temporal_tride = temporal_stride
self.dilation = dilation
self.with_cp = with_cp
if if_nonlocal and nonlocal_cfg is not None:
nonlocal_cfg_ = nonlocal_cfg.copy()
nonlocal_cfg_['in_channels'] = planes * self.expansion
self.nonlocal_block = build_nonlocal_block(nonlocal_cfg_)
else:
self.nonlocal_block = None
def forward(self, x):
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
if self.nonlocal_block is not None:
out = self.nonlocal_block(out)
return out
def make_res_layer(block,
inplanes,
planes,
blocks,
spatial_stride=1,
temporal_stride=1,
dilation=1,
style='pytorch',
inflate_freq=1,
inflate_style='3x1x1',
nonlocal_freq=1,
nonlocal_cfg=None,
with_cp=False):
inflate_freq = inflate_freq if not isinstance(inflate_freq, int) else (inflate_freq,) * blocks
nonlocal_freq = nonlocal_freq if not isinstance(nonlocal_freq, int) else (nonlocal_freq,) * blocks
assert len(inflate_freq) == blocks
assert len(nonlocal_freq) == blocks
downsample = None
if spatial_stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv3d(
inplanes,
planes * block.expansion,
kernel_size=1,
stride=(temporal_stride, spatial_stride, spatial_stride),
bias=False),
nn.BatchNorm3d(planes * block.expansion),
)
layers = []
layers.append(
block(
inplanes,
planes,
spatial_stride,
temporal_stride,
dilation,
downsample,
style=style,
if_inflate=(inflate_freq[0] == 1),
inflate_style=inflate_style,
if_nonlocal=(nonlocal_freq[0] == 1),
nonlocal_cfg=nonlocal_cfg,
with_cp=with_cp))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(inplanes,
planes,
1, 1,
dilation,
style=style,
if_inflate=(inflate_freq[i] == 1),
inflate_style=inflate_style,
if_nonlocal=(nonlocal_freq[i] == 1),
nonlocal_cfg=nonlocal_cfg,
with_cp=with_cp))
return nn.Sequential(*layers)
class ResNet_SlowFast(nn.Module):
"""ResNe(x)t_SlowFast backbone.
Args:
depth (int): Depth of resnet, from {50, 101, 152}.
num_stages (int): Resnet stages, normally 4.
strides (Sequence[int]): Strides of the first block of each stage.
dilations (Sequence[int]): Dilation of each stage.
out_indices (Sequence[int]): Output from which stages.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer.
frozen_stages (int): Stages to be frozen (all param fixed). -1 means
not freezing any parameters.
bn_eval (bool): Whether to set BN layers to eval mode, namely, freeze
running stats (mean and var).
bn_frozen (bool): Whether to freeze weight and bias of BN layers.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
"""
arch_settings = {
50: (Bottleneck, (3, 4, 6, 3)),
101: (Bottleneck, (3, 4, 23, 3)),
152: (Bottleneck, (3, 8, 36, 3))
}
def __init__(self,
num_classes,
depth,
pretrained=None,
pretrained_base=True,
feat_ext=False,
num_stages=4,
spatial_strides=(1, 2, 2, 2),
temporal_strides=(1, 1, 1, 1),
dilations=(1, 1, 1, 1),
out_indices=(0, 1, 2, 3),
conv1_kernel_t=1,
conv1_stride_t=1,
pool1_kernel_t=1,
pool1_stride_t=1,
style='pytorch',
frozen_stages=-1,
inflate_freq=(0, 0, 1, 1),
inflate_stride=(1, 1, 1, 1),
inflate_style='3x1x1',
nonlocal_stages=(-1,),
nonlocal_freq=(0, 0, 0, 0),
nonlocal_cfg=None,
bn_eval=False,
bn_frozen=False,
partial_bn=False,
with_cp=False,
dropout_ratio=0.5,
init_std=0.01):
super(ResNet_SlowFast, self).__init__()
if depth not in self.arch_settings:
raise KeyError('invalid depth {} for resnet'.format(depth))
self.num_classes = num_classes
self.depth = depth
self.pretrained = pretrained
self.pretrained_base = pretrained_base
self.num_stages = num_stages
assert 1 <= num_stages <= 4
self.spatial_strides = spatial_strides
self.temporal_strides = temporal_strides
self.dilations = dilations
assert len(spatial_strides) == len(temporal_strides) == len(dilations) == num_stages
self.out_indices = out_indices
assert max(out_indices) < num_stages
self.style = style
self.frozen_stages = frozen_stages
self.inflate_freqs = inflate_freq if not isinstance(inflate_freq, int) else (inflate_freq,) * num_stages
self.inflate_style = inflate_style
self.nonlocal_stages = nonlocal_stages
self.nonlocal_freqs = nonlocal_freq if not isinstance(nonlocal_freq, int) else (nonlocal_freq,) * num_stages
self.nonlocal_cfg = nonlocal_cfg
self.bn_eval = bn_eval
self.bn_frozen = bn_frozen
self.partial_bn = partial_bn
self.with_cp = with_cp
self.feat_ext = feat_ext
self.dropout_ratio = dropout_ratio
self.init_std = init_std
self.block, stage_blocks = self.arch_settings[depth]
self.stage_blocks = stage_blocks[:num_stages]
self.inplanes = 64
self.conv1 = nn.Conv3d(
3, 64, kernel_size=(conv1_kernel_t, 7, 7), stride=(conv1_stride_t, 2, 2),
padding=((conv1_kernel_t - 1) // 2, 3, 3), bias=False)
self.bn1 = nn.BatchNorm3d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool3d(kernel_size=(pool1_kernel_t, 3, 3), stride=(pool1_stride_t, 2, 2),
padding=(pool1_kernel_t // 2, 1, 1))
self.res_layers = []
for i, num_blocks in enumerate(self.stage_blocks):
spatial_stride = spatial_strides[i]
temporal_stride = temporal_strides[i]
dilation = dilations[i]
planes = 64 * 2 ** i
res_layer = make_res_layer(
self.block,
self.inplanes,
planes,
num_blocks,
spatial_stride=spatial_stride,
temporal_stride=temporal_stride,
dilation=dilation,
style=self.style,
inflate_freq=self.inflate_freqs[i],
inflate_style=self.inflate_style,
nonlocal_freq=self.nonlocal_freqs[i],
nonlocal_cfg=self.nonlocal_cfg if i in self.nonlocal_stages else None,
with_cp=with_cp)
self.inplanes = planes * self.block.expansion
layer_name = 'layer{}'.format(i + 1)
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self.feat_dim = self.block.expansion * 64 * 2 ** (len(self.stage_blocks) - 1)
if self.dropout_ratio != 0:
self.dropout = nn.Dropout(p=self.dropout_ratio)
else:
self.dropout = None
self.avg_pool = nn.AdaptiveAvgPool3d(1)
self.fc = nn.Linear(in_features=2048, out_features=num_classes)
if not self.pretrained:
nn.init.normal_(self.fc.weight, 0, self.init_std)
nn.init.constant_(self.fc.bias, 0)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avg_pool(x)
if self.dropout is not None:
x = self.dropout(x)
x = x.view(x.size(0), -1)
if self.feat_ext:
return x
out = self.fc(x)
return out
def i3d_slow_resnet50_f32s2_kinetics400(cfg):
model = ResNet_SlowFast(num_classes=cfg.CONFIG.DATA.NUM_CLASSES,
depth=50,
pretrained=cfg.CONFIG.MODEL.PRETRAINED,
pretrained_base=cfg.CONFIG.MODEL.PRETRAINED_BASE,
feat_ext=cfg.CONFIG.INFERENCE.FEAT,
bn_eval=cfg.CONFIG.MODEL.BN_EVAL,
partial_bn=cfg.CONFIG.MODEL.PARTIAL_BN,
bn_frozen=cfg.CONFIG.MODEL.BN_FROZEN)
if cfg.CONFIG.MODEL.PRETRAINED:
from ..model_store import get_model_file
model.load_state_dict(torch.load(get_model_file('i3d_slow_resnet50_f32s2_kinetics400',
tag=cfg.CONFIG.MODEL.PRETRAINED)))
return model
def i3d_slow_resnet50_f16s4_kinetics400(cfg):
model = ResNet_SlowFast(num_classes=cfg.CONFIG.DATA.NUM_CLASSES,
depth=50,
pretrained=cfg.CONFIG.MODEL.PRETRAINED,
pretrained_base=cfg.CONFIG.MODEL.PRETRAINED_BASE,
feat_ext=cfg.CONFIG.INFERENCE.FEAT,
bn_eval=cfg.CONFIG.MODEL.BN_EVAL,
partial_bn=cfg.CONFIG.MODEL.PARTIAL_BN,
bn_frozen=cfg.CONFIG.MODEL.BN_FROZEN)
if cfg.CONFIG.MODEL.PRETRAINED:
from ..model_store import get_model_file
model.load_state_dict(torch.load(get_model_file('i3d_slow_resnet50_f16s4_kinetics400',
tag=cfg.CONFIG.MODEL.PRETRAINED)))
return model
def i3d_slow_resnet50_f8s8_kinetics400(cfg):
model = ResNet_SlowFast(num_classes=cfg.CONFIG.DATA.NUM_CLASSES,
depth=50,
pretrained=cfg.CONFIG.MODEL.PRETRAINED,
pretrained_base=cfg.CONFIG.MODEL.PRETRAINED_BASE,
feat_ext=cfg.CONFIG.INFERENCE.FEAT,
bn_eval=cfg.CONFIG.MODEL.BN_EVAL,
partial_bn=cfg.CONFIG.MODEL.PARTIAL_BN,
bn_frozen=cfg.CONFIG.MODEL.BN_FROZEN)
if cfg.CONFIG.MODEL.PRETRAINED:
from ..model_store import get_model_file
model.load_state_dict(torch.load(get_model_file('i3d_slow_resnet50_f8s8_kinetics400',
tag=cfg.CONFIG.MODEL.PRETRAINED)))
return model
def i3d_slow_resnet101_f32s2_kinetics400(cfg):
model = ResNet_SlowFast(num_classes=cfg.CONFIG.DATA.NUM_CLASSES,
depth=101,
pretrained=cfg.CONFIG.MODEL.PRETRAINED,
pretrained_base=cfg.CONFIG.MODEL.PRETRAINED_BASE,
feat_ext=cfg.CONFIG.INFERENCE.FEAT,
bn_eval=cfg.CONFIG.MODEL.BN_EVAL,
partial_bn=cfg.CONFIG.MODEL.PARTIAL_BN,
bn_frozen=cfg.CONFIG.MODEL.BN_FROZEN)
if cfg.CONFIG.MODEL.PRETRAINED:
from ..model_store import get_model_file
model.load_state_dict(torch.load(get_model_file('i3d_slow_resnet101_f32s2_kinetics400',
tag=cfg.CONFIG.MODEL.PRETRAINED)))
return model
def i3d_slow_resnet101_f16s4_kinetics400(cfg):
model = ResNet_SlowFast(num_classes=cfg.CONFIG.DATA.NUM_CLASSES,
depth=101,
pretrained=cfg.CONFIG.MODEL.PRETRAINED,
pretrained_base=cfg.CONFIG.MODEL.PRETRAINED_BASE,
feat_ext=cfg.CONFIG.INFERENCE.FEAT,
bn_eval=cfg.CONFIG.MODEL.BN_EVAL,
partial_bn=cfg.CONFIG.MODEL.PARTIAL_BN,
bn_frozen=cfg.CONFIG.MODEL.BN_FROZEN)
if cfg.CONFIG.MODEL.PRETRAINED:
from ..model_store import get_model_file
model.load_state_dict(torch.load(get_model_file('i3d_slow_resnet101_f16s4_kinetics400',
tag=cfg.CONFIG.MODEL.PRETRAINED)))
return model
def i3d_slow_resnet101_f8s8_kinetics400(cfg):
model = ResNet_SlowFast(num_classes=cfg.CONFIG.DATA.NUM_CLASSES,
depth=101,
pretrained=cfg.CONFIG.MODEL.PRETRAINED,
pretrained_base=cfg.CONFIG.MODEL.PRETRAINED_BASE,
feat_ext=cfg.CONFIG.INFERENCE.FEAT,
bn_eval=cfg.CONFIG.MODEL.BN_EVAL,
partial_bn=cfg.CONFIG.MODEL.PARTIAL_BN,
bn_frozen=cfg.CONFIG.MODEL.BN_FROZEN)
if cfg.CONFIG.MODEL.PRETRAINED:
from ..model_store import get_model_file
model.load_state_dict(torch.load(get_model_file('i3d_slow_resnet101_f8s8_kinetics400',
tag=cfg.CONFIG.MODEL.PRETRAINED)))
return model
def i3d_slow_resnet50_f32s2_custom(cfg):
model = ResNet_SlowFast(num_classes=cfg.CONFIG.DATA.NUM_CLASSES,
depth=50,
pretrained=cfg.CONFIG.MODEL.PRETRAINED,
pretrained_base=cfg.CONFIG.MODEL.PRETRAINED_BASE,
feat_ext=cfg.CONFIG.INFERENCE.FEAT,
bn_eval=cfg.CONFIG.MODEL.BN_EVAL,
partial_bn=cfg.CONFIG.MODEL.PARTIAL_BN,
bn_frozen=cfg.CONFIG.MODEL.BN_FROZEN)
if cfg.CONFIG.MODEL.PRETRAINED:
from ..model_store import get_model_file
state_dict = torch.load(get_model_file('i3d_slow_resnet50_f32s2_kinetics400', tag=cfg.CONFIG.MODEL.PRETRAINED))
for k in list(state_dict.keys()):
# retain only backbone up to before the classification layer
if k.startswith('fc'):
del state_dict[k]
msg = model.load_state_dict(state_dict, strict=False)
assert set(msg.missing_keys) == {'fc.weight', 'fc.bias'}
print("=> Initialized from a I3D_slow model pretrained on Kinetcis400 dataset")
return model
|
[
"[email protected]"
] | |
73363d48a6cac54ee344107bc25fcafb7a2ad9ea
|
276578a3f4747879cbe99425e15daf100fa4e4c2
|
/to_do_api/models.py
|
fd9af7ab348fd45b6ddb4b8da500f55557519a2d
|
[] |
no_license
|
nadersayed22/to-do-task
|
38d5ee06c6258e7ea4ef94bf7d82539b71cf4146
|
1d151918b93eaaa40826a405de9322bdbe82efc2
|
refs/heads/master
| 2023-04-17T12:13:30.968495 | 2021-04-28T23:15:21 | 2021-04-28T23:15:21 | 356,759,508 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,249 |
py
|
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
from django.contrib.auth.models import PermissionsMixin
from django_timestamps.softDeletion import SoftDeletionModel
from django_timestamps.timestamps import TimestampsModel
# Create your models here.
class UserProfileManager(BaseUserManager):
""""
class required by django for managing our users from management command
"""
def create_user(self, email, password=None):
if not email:
raise ValueError("Users Must Have EMail Address")
# create anew user object
user = self.model(
email=self.normalize_email(email),
)
# create new pass
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password=None):
""""
create and save new superuser with given details
"""
# override on create fun
user = self.create_user(email, password)
# make this user an admin
user.is_superuser = True
user.set_password(password)
user.is_staff = True
user.save(using=self._db)
return user
class UserProfile(AbstractBaseUser, PermissionsMixin):
"""
a user profile in our system
"""
email = models.EmailField(max_length=255, unique=True)
is_superuser = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
is_active = models.BooleanField(default=True)
objects = UserProfileManager()
USERNAME_FIELD = 'email'
class Meta:
verbose_name = "User Profile"
verbose_name_plural = "User Profiles"
def __str__(self):
"""
What to show when we output an object as a string
"""
return self.email
class Task(models.Model):
"""
model to create single task
"""
body = models.TextField(max_length=1000)
completed = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
modified_at = models.DateTimeField(auto_now=True)
def __str__(self):
"""
What to show when we output an object as a string
"""
return self.body
|
[
"[email protected]"
] | |
cc9c24223afa3233535419d0963c36d03937434c
|
f973fb52affe135e725e157c0df95324975ef795
|
/Telecom-Customer-Churn/docs/conf.py
|
d86edd5a017426b310948e9409a59f94b52dbabc
|
[] |
no_license
|
Pasoosh/CustomerChurn
|
efae2fb489890a48a2324bb05273c2f9b4dad787
|
89772d8d2be572e88eae0ee3fe11500ffb38284e
|
refs/heads/master
| 2020-12-13T05:03:38.164118 | 2020-02-11T03:12:02 | 2020-02-11T03:12:02 | 234,318,800 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,936 |
py
|
# -*- coding: utf-8 -*-
#
# Telecom-Customer-Churn documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Telecom-Customer-Churn'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Telecom-Customer-Churndoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'Telecom-Customer-Churn.tex',
u'Telecom-Customer-Churn Documentation',
u"Leonardo dos Passos", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'Telecom-Customer-Churn', u'Telecom-Customer-Churn Documentation',
[u"Leonardo dos Passos"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Telecom-Customer-Churn', u'Telecom-Customer-Churn Documentation',
u"Leonardo dos Passos", 'Telecom-Customer-Churn',
'Use the data to try to predict customer churn', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
|
[
"[email protected]"
] | |
1dcc63d83716fd495907c607354e43e5e0f8ea87
|
4210f8105f9e9a097e3544d15ec2c859bc737bf9
|
/dictpandas.py
|
3e4d82f99249e166d2408c4f08d1a3feff320049
|
[] |
no_license
|
Taranjeet0874/Python
|
f56fb4f39a4a9df22d16c37e5ed808d80c6641c3
|
5749a394099f26fd0f15fe2674035053c297adcb
|
refs/heads/master
| 2022-11-26T21:30:13.044112 | 2020-08-01T12:22:06 | 2020-08-01T12:22:06 | 284,251,181 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 86 |
py
|
import pandas as pd
dic={'a':7,'b':5,'c':9,'d':2}
dict=pd.Series(dic)
print(dict)
|
[
"[email protected]"
] | |
c008d92d5264518d006a4ff9b43acef4f19e4c38
|
30b004cad2c14b47b5f66c3a4a0015e05ca4a27e
|
/contrib/data_safety_training/image_classification/submitter.py
|
920b60ad8fed2d7ff0b13d17001d8227f3b0abb8
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/PaddleFL
|
66c26f774eeadc25c12e74056ac389e0c1f61b84
|
dcc00c5dff62c3dd0092801f4e9b89d8c0957d3d
|
refs/heads/master
| 2023-08-07T22:05:24.806573 | 2023-03-21T01:15:10 | 2023-03-21T01:15:10 | 210,873,203 | 486 | 136 |
Apache-2.0
| 2023-07-26T22:30:57 | 2019-09-25T15:01:39 |
Python
|
UTF-8
|
Python
| false | false | 1,090 |
py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import zmq
import socket
import msgpack
import os
mission_dict = {"mission": "image classification", "image_size": [3, 32, 32]}
#send request
context = zmq.Context()
zmq_socket = context.socket(zmq.REQ)
zmq_socket.connect("tcp://127.0.0.1:60001")
zmq_socket.send(msgpack.dumps(mission_dict))
#get and download encoder
file = zmq_socket.recv()
os.system("wget 127.0.0.1:8080/{}".format(file))
#data encoding
os.system("python -u user.py > user.log")
zmq_socket.send("complete")
|
[
"[email protected]"
] | |
a5c34785b6e67c5d99db3841e937400c96a08ec5
|
039ef0d50d998ae083ac079c2c4abab50f7b6cc1
|
/src/pyfluid/fluidsimulationsavestate.py
|
b1704097e255faabe5c244dff72dd95ccb83fe43
|
[
"MIT"
] |
permissive
|
Computational-Fluid-Dynamics/Fluid-Simulation-for-Computer-Graphics
|
37fc2dff5ebe9d3433ba78f1ea171471a8e7c24b
|
1df6268e0a7563e3aedecd7bdee4258d41d4e729
|
refs/heads/master
| 2023-03-21T15:40:58.085143 | 2020-08-27T08:06:33 | 2020-08-27T08:06:33 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 9,061 |
py
|
import ctypes
from ctypes import c_void_p, c_char_p, c_int, c_double, byref
from .pyfluid import pyfluid as lib
from .vector3 import Vector3, Vector3_t
from .gridindex import GridIndex, GridIndex_t
from . import pybindings as pb
def _check_load_state_initialized(func):
def wrapper(*args, **kwargs):
self = args[0]
if isinstance(self, FluidSimulationSaveState):
self._check_load_state()
return func(*args, **kwargs)
return wrapper
class FluidSimulationSaveState(object):
def __init__(self):
libfunc = lib.FluidSimulationSaveState_new
pb.init_lib_func(libfunc, [c_void_p], c_void_p)
self._obj = pb.execute_lib_func(libfunc, [])
def __del__(self):
libfunc = lib.FluidSimulationSaveState_destroy
pb.init_lib_func(libfunc, [c_void_p], None)
try:
libfunc(self._obj)
except:
pass
def __call__(self):
return self._obj
def save_state(self, filename, fluidsimulation):
libfunc = lib.FluidSimulationSaveState_save_state
pb.init_lib_func(libfunc, [c_void_p, c_char_p, c_void_p, c_void_p], None)
pb.execute_lib_func(libfunc, [self(), filename, fluidsimulation()])
def load_state(self, filename):
libfunc = lib.FluidSimulationSaveState_load_state
pb.init_lib_func(libfunc, [c_void_p, c_char_p, c_void_p], c_int)
return bool(pb.execute_lib_func(libfunc, [self(), filename]))
def close_state(self):
libfunc = lib.FluidSimulationSaveState_close_state
pb.init_lib_func(libfunc, [c_void_p, c_void_p], None)
pb.execute_lib_func(libfunc, [self()])
@_check_load_state_initialized
def get_grid_dimensions(self):
i = ctypes.c_int()
j = ctypes.c_int()
k = ctypes.c_int()
success = ctypes.c_int()
libfunc = lib.FluidSimulationSaveState_get_grid_dimensions
pb.init_lib_func(libfunc,
[c_void_p, c_void_p, c_void_p, c_void_p, c_void_p], None)
libfunc(self(), byref(i), byref(j), byref(k), byref(success))
pb.check_success(success, libfunc.__name__ + " - ")
return GridIndex(i.value, j.value, k.value)
@_check_load_state_initialized
def get_cell_size(self):
libfunc = lib.FluidSimulationSaveState_get_cell_size
pb.init_lib_func(libfunc, [c_void_p, c_void_p], c_double)
return pb.execute_lib_func(libfunc, [self()])
@_check_load_state_initialized
def get_current_frame(self):
libfunc = lib.FluidSimulationSaveState_get_current_frame
pb.init_lib_func(libfunc, [c_void_p, c_void_p], c_int)
return pb.execute_lib_func(libfunc, [self()])
@_check_load_state_initialized
def get_num_marker_particles(self):
libfunc = lib.FluidSimulationSaveState_get_num_marker_particles
pb.init_lib_func(libfunc, [c_void_p, c_void_p], c_int)
return pb.execute_lib_func(libfunc, [self()])
@_check_load_state_initialized
def get_num_diffuse_particles(self):
libfunc = lib.FluidSimulationSaveState_get_num_diffuse_particles
pb.init_lib_func(libfunc, [c_void_p, c_void_p], c_int)
return pb.execute_lib_func(libfunc, [self()])
@_check_load_state_initialized
def get_num_solid_cells(self):
libfunc = lib.FluidSimulationSaveState_get_num_solid_cells
pb.init_lib_func(libfunc, [c_void_p, c_void_p], c_int)
return pb.execute_lib_func(libfunc, [self()])
@_check_load_state_initialized
def get_marker_particle_positions(self, startidx = None, endidx = None):
nparticles = self.get_num_marker_particles()
startidx, endidx = self._check_range(startidx, endidx, 0, nparticles)
n = endidx - startidx
out = (Vector3_t * n)()
libfunc = lib.FluidSimulationSaveState_get_marker_particle_positions
pb.init_lib_func(libfunc,
[c_void_p, c_int, c_int, c_void_p, c_void_p], None)
pb.execute_lib_func(libfunc, [self(), startidx, endidx, out])
return out
@_check_load_state_initialized
def get_marker_particle_velocities(self, startidx = None, endidx = None):
nparticles = self.get_num_marker_particles()
startidx, endidx = self._check_range(startidx, endidx, 0, nparticles)
n = endidx - startidx
out = (Vector3_t * n)()
libfunc = lib.FluidSimulationSaveState_get_marker_particle_velocities
pb.init_lib_func(libfunc,
[c_void_p, c_int, c_int, c_void_p, c_void_p], None)
pb.execute_lib_func(libfunc, [self(), startidx, endidx, out])
return out
@_check_load_state_initialized
def get_diffuse_particle_positions(self, startidx = None, endidx = None):
nparticles = self.get_num_diffuse_particles()
startidx, endidx = self._check_range(startidx, endidx, 0, nparticles)
n = endidx - startidx
out = (Vector3_t * n)()
libfunc = lib.FluidSimulationSaveState_get_diffuse_particle_positions
pb.init_lib_func(libfunc,
[c_void_p, c_int, c_int, c_void_p, c_void_p], None)
pb.execute_lib_func(libfunc, [self(), startidx, endidx, out])
return out
@_check_load_state_initialized
def get_diffuse_particle_velocities(self, startidx = None, endidx = None):
nparticles = self.get_num_diffuse_particles()
startidx, endidx = self._check_range(startidx, endidx, 0, nparticles)
n = endidx - startidx
out = (Vector3_t * n)()
libfunc = lib.FluidSimulationSaveState_get_diffuse_particle_velocities
pb.init_lib_func(libfunc,
[c_void_p, c_int, c_int, c_void_p, c_void_p], None)
pb.execute_lib_func(libfunc, [self(), startidx, endidx, out])
return out
@_check_load_state_initialized
def get_diffuse_particle_lifetimes(self, startidx = None, endidx = None):
nparticles = self.get_num_diffuse_particles()
startidx, endidx = self._check_range(startidx, endidx, 0, nparticles)
n = endidx - startidx
out = (ctypes.c_float * n)()
libfunc = lib.FluidSimulationSaveState_get_diffuse_particle_lifetimes
pb.init_lib_func(libfunc,
[c_void_p, c_int, c_int, c_void_p, c_void_p], None)
pb.execute_lib_func(libfunc, [self(), startidx, endidx, out])
lifetimes = [0.0]*n
for i in range(n):
lifetimes[i] = out[i]
return lifetimes
@_check_load_state_initialized
def get_diffuse_particle_types(self, startidx = None, endidx = None):
nparticles = self.get_num_diffuse_particles()
startidx, endidx = self._check_range(startidx, endidx, 0, nparticles)
n = endidx - startidx
out = (ctypes.c_char * n)()
libfunc = lib.FluidSimulationSaveState_get_diffuse_particle_types
pb.init_lib_func(libfunc,
[c_void_p, c_int, c_int, c_void_p, c_void_p], None)
pb.execute_lib_func(libfunc, [self(), startidx, endidx, out])
types = [0]*n
for i in range(n):
types[i] = ord(out[i])
return types
@_check_load_state_initialized
def get_solid_cells(self, startidx = None, endidx = None):
ncells = self.get_num_solid_cells()
startidx, endidx = self._check_range(startidx, endidx, 0, ncells)
n = endidx - startidx
out = (GridIndex_t * n)()
libfunc = lib.FluidSimulationSaveState_get_solid_cells
pb.init_lib_func(libfunc,
[c_void_p, c_int, c_int, c_void_p, c_void_p], None)
pb.execute_lib_func(libfunc, [self(), startidx, endidx, out])
return out
@_check_load_state_initialized
def is_fluid_brick_grid_enabled(self):
libfunc = lib.FluidSimulationSaveState_is_fluid_brick_grid_enabled
pb.init_lib_func(libfunc, [c_void_p, c_void_p], c_int)
return bool(pb.execute_lib_func(libfunc, [self()]))
def is_load_state_initialized(self):
libfunc = lib.FluidSimulationSaveState_is_load_state_initialized
pb.init_lib_func(libfunc, [c_void_p, c_void_p], c_int)
return bool(pb.execute_lib_func(libfunc, [self()]))
def _check_range(self, startidx, endidx, minidx, maxidx):
if startidx is None:
startidx = minidx
if endidx is None:
endidx = maxidx
if not isinstance(startidx, int) or not isinstance(endidx, int):
raise TypeError("Index range must be integers")
if startidx < minidx:
raise IndexError("startidx out of range: " + str(startidx))
if endidx > maxidx:
raise IndexError("endidx out of range: " + str(endidx))
if endidx < startidx:
endidx = startidx
return startidx, endidx
def _check_load_state(self):
if not self.is_load_state_initialized():
raise RuntimeError("Savestate must be loaded to use this method.")
|
[
"[email protected]"
] | |
b4ae9f1575266b930744a7ac2eb33ce2b5e6389f
|
8dcc00b1d3843e5d0734b1978c301f424b4a7019
|
/optimize.py
|
7c00dd3260feab7443e46c1ff9b2aba3a3e5ee94
|
[] |
no_license
|
rezanmz/GNN-NAS
|
67804bd22bb3a8456e7542e183b1adc3f07777f3
|
ddde87dbf28f2b6a35c14945bed7e872a4d102e3
|
refs/heads/main
| 2023-08-30T12:19:34.238812 | 2021-09-20T05:25:35 | 2021-09-20T05:25:35 | 408,321,092 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,503 |
py
|
import matplotlib.pyplot as plt
from train import evaluate, train
from model import GraphSAGEModel
from utils import coarsen_graph, load_dataset
import optuna
import torch
from tqdm import tqdm
optuna.logging.set_verbosity(optuna.logging.WARNING)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# device = torch.device('cpu')
# COARSENING_RATIO = []
COARSENING_RATIO = [0.9, 0.6, 0.3]
# COARSENING_RATIO = [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]
# COARSENING_RATIO = [0.5]
DATASET = 'CiteSeer'
PERCENT_AVAILABLE_TRAINING_DATA = 5
# Choices: ['variation_neighborhoods', 'variation_edges', 'variation_cliques', 'heavy_edge', 'algebraic_JC', 'affinity_GS', 'kron']
COARSENING_METHOD = 'kron'
# Matplotlib stuff
plt.ion()
class DynamicUpdate():
# Suppose we know the x range
min_x = 0
max_x = (len(COARSENING_RATIO) + 1) * 100
def on_launch(self):
# Set up plot
self.figure, self.ax = plt.subplots()
self.lines, = self.ax.plot([], [], 'o', markersize=2)
self.ax.set_xlabel('Iterations')
self.ax.set_ylabel('Accuracy')
self.ax.set_title(
f'Fast Optuna Optimization on {DATASET}')
# self.ax.set_title(
# f'Fast Optuna Optimization using Hierarchical View {DATASET}')
# Autoscale on unknown axis and known lims on the other
self.ax.set_autoscaley_on(True)
# self.ax.set_xlim(self.min_x, self.max_x)
# Other stuff
self.ax.grid()
...
def on_running(self, xdata, ydata):
# Update data (with the new _and_ the old points)
self.lines.set_xdata(xdata)
self.lines.set_ydata(ydata)
# Need both of these in order to rescale
self.ax.relim()
self.ax.autoscale_view()
# We need to draw *and* flush
self.figure.canvas.draw()
self.figure.canvas.flush_events()
# Example
# def __call__(self):
# import numpy as np
# import time
# self.on_launch()
# xdata = []
# ydata = []
# for x in np.arange(0, 10, 0.5):
# xdata.append(x)
# ydata.append(np.exp(-x**2)+10*np.exp(-(x-7)**2))
# self.on_running(xdata, ydata)
# time.sleep(1)
# return xdata, ydata
d = DynamicUpdate()
# Pre-Processing
data, num_classes = load_dataset(DATASET, PERCENT_AVAILABLE_TRAINING_DATA)
x, labels, edge_index, train_mask, validation_mask = data.x, data.y, data.edge_index, data.train_mask, data.val_mask
x = x.to(device)
labels = labels.to(device)
edge_index = edge_index.to(device)
train_mask = train_mask.to(device)
validation_mask = validation_mask.to(device)
coarsened_graphs = []
for ratio in tqdm(COARSENING_RATIO, total=len(COARSENING_RATIO), desc='Generating Coarsened Graphs'):
result = coarsen_graph(data, ratio, COARSENING_METHOD)
# for i in range(len(result)):
# result[i] = result[i].to(device)
coarsened_graphs.append({
'ratio': ratio,
'coarsen_x': result[0].to(device),
'coarsen_train_labels': result[1].to(device),
'coarsen_train_mask': result[2].to(device),
# 'coarsen_val_labels': result[3],
# 'coarsen_val_mask': result[4],
'coarsen_edge': result[5].to(device),
})
coarsened_graphs.append({
'ratio': 0,
'coarsen_x': x,
'coarsen_train_labels': labels,
'coarsen_train_mask': train_mask,
# 'coarsen_val_labels': labels,
# 'coarsen_val_mask': validation_mask,
'coarsen_edge': edge_index,
})
coarsen_x = None
coarsen_train_labels = None
coarsen_train_mask = None
coarsen_edge = None
accuracies = []
def objective(trial):
# n_layers = trial.suggest_int('n_layers', 1, 5)
n_layers = 5
layers = []
for l in range(n_layers):
layers.append({
'output_dim': trial.suggest_int(f'l{l}_output_dim', 1, 200) if l != (n_layers - 1) else num_classes,
'normalize': trial.suggest_categorical(f'l{l}_normalize', [True, False]),
'root_weight': trial.suggest_categorical(f'l{l}_root_weight', [True, False]),
'bias': trial.suggest_categorical(f'l{l}_bias', [True, False]),
'aggr': trial.suggest_categorical(f'l{l}_aggr', ['add', 'mean', 'max']),
'activation': trial.suggest_categorical(f'l{l}_activation', ['sigmoid', 'elu', 'relu', 'softmax', 'tanh', 'softplus', 'leaky_relu', 'relu6', None]),
'dropout': trial.suggest_float(f'l{l}_dropout', 0.0, 1.0),
})
model = GraphSAGEModel(layers, x.shape[1]).to(device)
optimizer = torch.optim.RMSprop(model.parameters())
for _ in range(50):
train(coarsen_x, coarsen_edge, coarsen_train_labels,
model, optimizer, coarsen_train_mask)
accuracies.append(evaluate(x, edge_index, labels, model, validation_mask))
d.on_running(range(len(accuracies)), accuracies)
return accuracies[-1]
study = optuna.create_study(direction='maximize')
COARSENING_RATIO.append(0)
d.on_launch()
for c in COARSENING_RATIO:
graph = None
for coarsened_graph in coarsened_graphs:
if coarsened_graph['ratio'] == c:
graph = coarsened_graph
break
coarsen_x = graph['coarsen_x']
coarsen_train_labels = graph['coarsen_train_labels']
coarsen_train_mask = graph['coarsen_train_mask']
coarsen_edge = graph['coarsen_edge']
print('Graph Size:', coarsen_x.shape[0])
study.optimize(objective, n_trials=50, show_progress_bar=True)
input()
|
[
"[email protected]"
] | |
834f9fe5415648747f7cdd025b3293da95197ed1
|
a567d8a737ce56f00c73938fbdd96ed2c6133522
|
/playlistlive/player/migrations/0002_auto_20200422_0046.py
|
9f1db51e331eb0fb90483535553846316f131760
|
[] |
no_license
|
irisswang/djrooms
|
edd30fe4ec5b6b33684688e8bde2b7854989916c
|
58e88c88870af10564a56408ef68439aab425318
|
refs/heads/master
| 2022-12-03T13:14:52.371720 | 2020-08-21T03:50:09 | 2020-08-21T03:50:09 | 289,170,931 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 534 |
py
|
# Generated by Django 3.0.5 on 2020-04-22 00:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('player', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='room',
name='label',
field=models.SlugField(null=True, unique=True),
),
migrations.AlterField(
model_name='room',
name='time_stamp',
field=models.IntegerField(null=True),
),
]
|
[
"[email protected]"
] | |
17b5eaa33969590f4387243407b4798aca232038
|
061e0fba7861dcc3e4be40bbba7267e42c44d182
|
/Task1.py
|
6bd52cfc8c35af8d946e223a083980900784b4e1
|
[] |
no_license
|
Jimmyqlx/EEGdataClassification
|
a019a572b5c01a30e5598ff2f168fb9f08f6e506
|
2e7bc82fab06b0db0b82b3c302c49083302ca089
|
refs/heads/master
| 2021-01-03T02:00:13.068942 | 2020-02-11T21:36:53 | 2020-02-11T21:36:53 | 239,869,229 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,115 |
py
|
from keras.models import Sequential
from keras.layers import LSTM, Dense, Dropout, Masking, Embedding
from keras.layers import Input,Dense, Conv2D, Flatten, MaxPooling2D, Dropout
import numpy as np
import matplotlib.pyplot as plt
import os
import scipy.io as sio
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
dir = 'D:\\PycharmProjects\\ces734_final_project\\data'
total_fold = ['1','2','3','4','5','6','7','8','9','10']
result=[[],[]]
def read_mat(fold):
#Features=[]
#Labels=[]
#Features=np.array()
features_mat = sio.loadmat('{}/Feature{}.mat'.format(dir,fold[0]))
features = features_mat['Feature{}'.format(fold[0])]
features=np.transpose(features)
labels_mat = sio.loadmat('{}/Y{}.mat'.format(dir,fold[0]))
labels=labels_mat['Y{}'.format(fold[0])]
labels=labels[0]
#Labels.append(labels)
for i in range(1,len(fold)):
f_mat = sio.loadmat('{}/Feature{}.mat'.format(dir,fold[i]))
f = f_mat['Feature{}'.format(fold[i])]
f=np.transpose(f)
features = np.concatenate((features,f))
#Features.append(f)
l_mat = sio.loadmat('{}/Y{}.mat'.format(dir,fold[i]))
l = l_mat['Y{}'.format(fold[i])]
l=l[0]
labels = np.concatenate([labels,l])
#Labels.append(labels)
#Features = np.array(Features)
#Labels = np.array(Labels)
return features,labels
for i in range(0,len(total_fold)):
total_temp=total_fold.copy()
#print(i)
#print(len(total_temp))
del total_temp[i]
#print(len(total_fold))
#print(len(total_temp))
train_fold=total_temp
print(train_fold)
test_fold=[]
test_fold.append(total_fold[i])
print(test_fold)
train_x, train_y = read_mat(train_fold)
test_x, test_y = read_mat(test_fold)
train_x = np.reshape(train_x, (train_x.shape[0],train_x.shape[1],1))
test_x = np.reshape(test_x, (test_x.shape[0],test_x.shape[1],1))
#train_y = np.reshape(train_y, (train_x.shape[0], 1))
#test_y = np.reshape(test_y, (test_x.shape[0], 1))
#train_y=np.reshape(train_y, (train_y.shape[0],1))
#test_y=np.reshape(test_y, (test_y.shape[0],1))
#print(train_x.shape)
model = Sequential()
model.add(LSTM(75, input_shape=(320,1)))
#model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
model.compile(
optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(train_x, train_y,epochs=1,validation_data=(test_x,test_y),verbose=0)
result[0].append(history.history['acc'][0])
result[1].append(history.history['val_acc'][0])
print('Leave Subject{} Out'.format(i))
print('Train Accuracy:{}'.format(history.history['acc'][0]))
print('Test Accuracy:{}'.format(history.history['val_acc'][0]))
print('Train Accuracy:{}'.format(result[0]))
print('Test Accuracy:{}'.format(result[1]))
avtrain = sum(result[0]) / len(result[0])
avtest = sum(result[1]) / len(result[1])
print('Average Train Accuracy:{}'.format(avtrain))
print('Average Test Accuracy:{}'.format(avtest))
print(result)
|
[
"[email protected]"
] | |
d0abf9db2f89e8b73654b4215f62188526f0ef67
|
a5cfc11819dcacc3629aced6bcbe324d694fd3b7
|
/CS-UY 1134/HW/HW9/sx670_hw9_q4.py
|
28a232af7dfd6e1453d2ddc0617a0b3642df47da
|
[] |
no_license
|
Kevinxu99/NYU-Coursework
|
712d26c808a8d9da94c889cac2cb594881a28c30
|
3b6a896a18e4ee2748fb8cdfdbfd1f6c4c2d212b
|
refs/heads/master
| 2022-04-21T20:43:07.205988 | 2020-04-11T10:39:47 | 2020-04-11T10:39:47 | 254,841,491 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,038 |
py
|
import random
class Empty(Exception):
pass
class UnsortedArrayMap:
class Item:
def __init__(self, key, value=None):
self.key = key
self.value = value
def __init__(self):
self.table = []
def __len__(self):
return len(self.table)
def is_empty(self):
return (len(self) == 0)
def __getitem__(self, key):
for item in self.table:
if key == item.key:
return item.value
raise KeyError("Key Error: " + str(key))
def __setitem__(self, key, value):
for item in self.table:
if key == item.key:
item.value = value
return
self.table.append(UnsortedArrayMap.Item(key, value))
def __delitem__(self, key):
for j in range(len(self.table)):
if key == self.table[j].key:
self.table.pop(j)
return
raise KeyError("Key Error: " + str(key))
def __iter__(self):
for item in self.table:
yield item.key
class DoublyLinkedList:
class Node:
def __init__(self, data=None, next=None, prev=None):
self.data = data
self.next = next
self.prev = prev
def disconnect(self):
self.data = None
self.next = None
self.prev = None
def __init__(self):
self.header = DoublyLinkedList.Node()
self.trailer = DoublyLinkedList.Node()
self.header.next = self.trailer
self.trailer.prev = self.header
self.size = 0
def __len__(self):
return self.size
def is_empty(self):
return (len(self) == 0)
def first_node(self):
if (self.is_empty()):
raise Empty("List is empty")
return self.header.next
def last_node(self):
if (self.is_empty()):
raise Empty("List is empty")
return self.trailer.prev
def add_first(self, elem):
return self.add_after(self.header, elem)
def add_last(self, elem):
return self.add_after(self.trailer.prev, elem)
def add_after(self, node, elem):
prev = node
succ = node.next
new_node = DoublyLinkedList.Node()
new_node.data = elem
new_node.prev = prev
new_node.next = succ
prev.next = new_node
succ.prev = new_node
self.size += 1
return new_node
def add_before(self, node, elem):
return self.add_after(node.prev, elem)
def delete(self, node):
prev = node.prev
succ = node.next
prev.next = succ
succ.prev = prev
self.size -= 1
data = node.data
node.disconnect()
return data
def __iter__(self):
if(self.is_empty()):
return
cursor = self.first_node()
while(cursor is not self.trailer):
yield cursor.data
cursor = cursor.next
def __str__(self):
return '[' + '<-->'.join([str(elem) for elem in self]) + ']'
def __repr__(self):
return str(self)
class ChainingHashTableMap:
def __init__(self, N=64, p=40206835204840513073):
self.N = N
self.table = [None] * self.N
self.dblst = DoublyLinkedList()
self.n = 0
self.p = p
self.a = random.randrange(1, self.p - 1)
self.b = random.randrange(0, self.p - 1)
def hash_function(self, k):
return ((self.a * hash(k) + self.b) % self.p) % self.N
def __len__(self):
return self.n
def __getitem__(self, key):
i = self.hash_function(key)
curr_bucket = self.table[i]
if curr_bucket is None:
raise KeyError("Key Error: " + str(key))
return curr_bucket[key].data
def __setitem__(self, key, value):
i = self.hash_function(key)
if self.table[i] is None:
self.table[i] = UnsortedArrayMap()
old_size = len(self.table[i])
self.dblst.add_last((key,value))
self.table[i][key] = self.dblst.last_node()
new_size = len(self.table[i])
if (new_size > old_size):
self.n += 1
if (self.n > self.N):
self.rehash(2 * self.N)
def __delitem__(self, key):
i = self.hash_function(key)
curr_bucket = self.table[i]
if curr_bucket is None:
raise KeyError("Key Error: " + str(key))
self.dblst.delete(curr_bucket[key])
del curr_bucket[key]
self.n -= 1
if (curr_bucket.is_empty()):
self.table[i] = None
if (self.n < self.N // 4):
self.rehash(self.N // 2)
def __iter__(self):
for key in self.dblst:
yield key[0]
def rehash(self, new_size):
old = []
for key in self:
value = self[key]
old.append((key, value))
self.table = [None] * new_size
self.n = 0
self.N = new_size
for (key, value) in old:
self[key] = value
|
[
"[email protected]"
] | |
862323cdd250fded22470d58b5b961390e8c4680
|
88748ec85d537e4b50ba45a255a0dcc3c154116f
|
/tests/unit/test_poll.py
|
2810d41f352a9741f36504ab9e9b2f71976b5c96
|
[
"MIT"
] |
permissive
|
byrgazov/vanilla
|
17c53843b1b2f6b5484e4ff8e2fab54123245cc0
|
2896ae049d9e58ef3b4008a869ebf481951d0780
|
refs/heads/master
| 2020-07-30T12:30:04.497223 | 2020-07-24T10:08:03 | 2020-07-24T10:08:03 | 210,235,284 | 0 | 0 |
MIT
| 2019-09-23T00:49:06 | 2019-09-23T00:49:06 | null |
UTF-8
|
Python
| false | false | 1,731 |
py
|
import os
import vanilla.poll
class TestPoll(object):
def test_poll(self):
poll = vanilla.poll.Poll()
r, w = os.pipe()
poll.register(r, vanilla.poll.POLLIN)
assert poll.poll(timeout=0) == []
os.write(w, '1')
assert poll.poll() == [(r, vanilla.poll.POLLIN)]
# test event is cleared
assert poll.poll(timeout=0) == []
# test event is reset on new write after read
assert os.read(r, 4096) == '1'
assert poll.poll(timeout=0) == []
os.write(w, '2')
assert poll.poll() == [(r, vanilla.poll.POLLIN)]
assert poll.poll(timeout=0) == []
# test event is reset on new write without read
os.write(w, '3')
assert poll.poll() == [(r, vanilla.poll.POLLIN)]
assert poll.poll(timeout=0) == []
assert os.read(r, 4096) == '23'
def test_write_close(self):
poll = vanilla.poll.Poll()
r, w = os.pipe()
poll.register(r, vanilla.poll.POLLIN)
poll.register(w, vanilla.poll.POLLOUT)
assert poll.poll() == [(w, vanilla.poll.POLLOUT)]
assert poll.poll(timeout=0) == []
os.close(w)
assert poll.poll() == [(r, vanilla.poll.POLLERR)]
assert poll.poll(timeout=0) == []
def test_read_close(self):
poll = vanilla.poll.Poll()
r, w = os.pipe()
poll.register(r, vanilla.poll.POLLIN)
poll.register(w, vanilla.poll.POLLOUT)
assert poll.poll() == [(w, vanilla.poll.POLLOUT)]
assert poll.poll(timeout=0) == []
os.close(r)
got = poll.poll()
assert got == [(w, vanilla.poll.POLLOUT), (w, vanilla.poll.POLLERR)]
assert poll.poll(timeout=0) == []
|
[
"[email protected]"
] | |
9186adbbcc93b2ca3c5ce9855e19456286ca235c
|
6c95964cdbca57ee0e68c0cbfe6cf98380222ac6
|
/utils_pre/cv_show.py
|
6a8a1fd40dded05f300897d7a8f04c99ad1f0613
|
[] |
no_license
|
ustcylw/MXTrainV1
|
b9a72492e3a04bdecf6ef1a76fbf4af81c923ff3
|
a36f56c53e6782f32628572297e63fdb13ddf52d
|
refs/heads/master
| 2023-06-24T17:56:33.169487 | 2021-07-29T07:06:45 | 2021-07-29T07:06:45 | 289,848,695 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,643 |
py
|
#! /usr/bin/env python
# coding: utf-8
import os, sys
import numpy as np
import cv2 as cv
import utils.utils as Utils
import mxnet as mx
def cv_show_image(image, wait_time=0, RGB2BGR=True, name='image'):
if RGB2BGR:
image = cv.cvtColor(image, cv.COLOR_RGB2BGR)
cv.imshow(name, cv.UMat(image))
if cv.waitKey(wait_time) == ord('q'):
sys.exit(0)
def cv_show_images(images, wait_time=0, RGB2BGR=True, name='images'):
for image in images:
cv_show_image(image, wait_time=wait_time, name=name)
def cv_show_batch_images(batch_images, wait_time=0, RGB2BGR=True, name='images'):
for i in range(batch_images.shape[0]):
image = batch_images[i, :, :, :]
image = image.transpose((1, 2, 0))
cv_show_image(image, wait_time=wait_time, name=name)
def cv_draw_bbox(image, bbox_xyxy, color=(255, 0, 0)):
return cv.rectangle(cv.UMat(image), (bbox_xyxy[0][0], bbox_xyxy[0][1]), (bbox_xyxy[1][0], bbox_xyxy[1][1]), color)
def cv_draw_points(image, points, color=(0, 0, 255), radius=1):
for point in points:
image = cv.circle(cv.UMat(image), center=(Utils.ToInt(point[0]), Utils.ToInt(point[1])), color=color, radius=radius)
return image
def cv_draw_batch_points(batch_images, batch_points, normalized=True, radius=1, color=(0, 0, 255)):
'''
:param batch_images: numpy.array, [N, C, H, W]
:param batch_points: numpy.array, [N, (x1, y1, x2, y2, ...)]
:param normalized: image transform
:param radius:
:param color:
:return:
'''
images = []
for i in range(batch_images.shape[0]):
image = batch_images[i, :, :, :]
image = image.transpose((1, 2, 0))
keypoints = batch_points[i, :].reshape((-1, 2))
if normalized:
image = image * 128.0 + 127.5
image = image.astype(np.uint8)
image = cv_draw_points(image, keypoints, color=color, radius=radius)
images.append(image)
return images
def cv_show_lm_rets(datas, predi, labeli):
if isinstance(datas, mx.nd.NDArray):
datas = datas.as_in_context(mx.cpu()).asnumpy()
if isinstance(predi, mx.nd.NDArray):
predi = predi.as_in_context(mx.cpu()).asnumpy()
if isinstance(labeli, mx.nd.NDArray):
labeli = labeli.as_in_context(mx.cpu()).asnumpy()
# cv_show_batch_images(datas, wait_time=300)
images = cv_draw_batch_points(datas, predi * 128.0, color=(255, 0, 0))
images = np.stack([image.get().transpose((2, 0, 1)) for image in images], axis=0)
images = cv_draw_batch_points(images, labeli, normalized=False, color=(0, 0, 255))
cv_show_images(images, wait_time=300)
|
[
"[email protected]"
] | |
3fd87ba9660551134d9c18266bba46a5294863d3
|
5ade54823f83b2c20b00ea46d1e3bc09846e1215
|
/datacube_zarr/utils/convert.py
|
b6218ecb7d207254be313c7f35f4787919f7ccc7
|
[
"Apache-2.0"
] |
permissive
|
opendatacube/datacube-zarr
|
5bab8aff0872fa428cd55a7e875b69be78172b6c
|
1c05a0c92d81b4235d75bbb01adcb2da9ae8013b
|
refs/heads/master
| 2023-06-08T03:50:18.316962 | 2021-06-30T04:49:31 | 2021-06-30T04:49:31 | 381,565,708 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,207 |
py
|
"""
Functions for converting datasets to zarr format.
Conversions are supported on a local filesystem or S3
"""
import logging
from os.path import commonprefix
from pathlib import Path
from typing import Any, Iterator, List, Optional, Tuple
from datacube_zarr.utils.raster import raster_to_zarr
_SUPPORTED_FORMATS = {
"ENVI": (".img/.hdr", ".bip/.hdr", ".bil/.hdr", ".bip/.hdr"),
"ERS": (".ers/.ers.aux.xml/",),
"GeoTiff": (".tif", ".tiff", ".gtif"),
"HDF": (".hdf", ".h5"),
"JPEG2000": (".jp2",),
"NetCDF": (".nc",),
}
_RASTERIO_FORMATS = (
"ENVI",
"ERS",
"GeoTiff",
"HDF",
"JPEG2000",
"NetCDF",
)
_RASTERIO_FILES = [
x.split("/")[0] for f in _RASTERIO_FORMATS for x in _SUPPORTED_FORMATS[f]
]
logger = logging.getLogger(__name__)
def _root_as_str(path: Path) -> str:
"""uri path to str."""
return path.as_uri() if path.as_uri().startswith("s3://") else str(path)
def ignore_file(path: Path, patterns: Optional[List[str]]) -> bool:
"""Check if path matches ignore patterns.
:param path: path to compar with ignore pattern
:param patterns: list of glob patterns specifying which paths to ignore
:return True if path is to be ignored
"""
return any(path.match(p) for p in patterns) if patterns else False
def get_datasets(in_dir: Path) -> Iterator[Tuple[str, List[Path]]]:
"""
Find supported datasets within a directory.
:param in_dir: directory (or S3 path) under-which to look for datasets
:return: iterator of datasets specified by type and file paths
"""
for fmt, filetypes in _SUPPORTED_FORMATS.items():
for exts in [ft.split("/") for ft in filetypes]:
data_ext = exts.pop(0)
for datafile in in_dir.glob(f"*{data_ext}"):
others = [datafile.with_suffix(e) for e in exts]
if all(o.exists() for o in others):
yield fmt, [datafile] + others
def convert_dir(
in_dir: Path,
out_dir: Optional[Path] = None,
ignore: Optional[List[str]] = None,
merge_datasets_per_dir: bool = False,
**zarrgs: Any,
) -> List[str]:
"""
Recursively convert datasets in a directory to Zarr format.
All supported datasets found underneath `in_dir` are (optionally) reprojected and
converted to zarr format. All other files are copied to the `out_dir` unless ignored.
If `out_dir` is not specfied the conversion is performed inplace and the original
raster files are removed.
:param in_dir: directory (or S3 path) under-which to convert rasters to zarr
:param out_dir: directory (or S3 path) to save converted datasets
:param ignore: list of glob patterns specifying files to ignore
:param merge_datasets_per_dir: option to merge all tifs found at a directory level
:param zarrgs: keyword arguments to pass to conversion function and zarr_io
"""
assert in_dir.is_dir()
output_zarrs = []
# find and convert datasets
datasets = [f for t, f in get_datasets(in_dir) if not ignore_file(f[0], ignore)]
converted_files = []
if datasets:
zarr_name = None
if merge_datasets_per_dir:
zarr_name = commonprefix([f[0].stem for f in datasets]) or in_dir.name
for files in datasets:
zarrs = convert_to_zarr(files, out_dir, zarr_name, **zarrgs)
output_zarrs.extend(zarrs)
converted_files.extend(files)
ignore_patterns = (ignore or []) + [str(f) for f in converted_files]
# recurse into directories (and copy other files)
for p in in_dir.iterdir():
if p.relative_to(in_dir).name and not ignore_file(p, ignore_patterns):
out_p = out_dir / p.name if out_dir else None
if p.is_dir():
zarrs = convert_dir(p, out_p, ignore, merge_datasets_per_dir, **zarrgs)
output_zarrs.extend(zarrs)
elif out_p is not None:
if out_p.as_uri().startswith("file://") and not out_p.parent.exists():
out_p.parent.mkdir(exist_ok=True, parents=True)
out_p.write_bytes(p.read_bytes())
return output_zarrs
def convert_to_zarr(
files: List[Path],
out_dir: Optional[Path] = None,
zarr_name: Optional[str] = None,
**zarrgs: Any,
) -> List[str]:
"""
Convert a supported dataset to Zarr format.
:param files: list of file making up the dataset (local filesystem or S3)
:param out_dir: output directory (local filesystem or S3)
:param zarr_name: name to give the created `.zarr` dataset
:param zarrgs: keyword arguments to pass to conversion function and zarr_io
:return: list of generated zarr URIs
"""
data_file = files[0]
inplace = out_dir is None
if out_dir is None:
out_dir = data_file.parent
if data_file.suffix in _RASTERIO_FILES:
zarrs = raster_to_zarr(data_file, out_dir, zarr_name, **zarrgs)
else:
raise ValueError(f"Unsupported data file format: {data_file.suffix}")
# if converting inplace, remove the original file
if inplace:
for f in files:
f.unlink()
logger.info(f"delete: {_root_as_str(f)}")
return zarrs
|
[
"[email protected]"
] | |
06b51ef2e0ace62f85db6ec90b93c94fa5739842
|
abfd2171bad6d822fdb59b9178d391885266492e
|
/TGNN_2T/util/data.py
|
612fb0ceb666c432cc598479f9c06ed2dcbfc07a
|
[] |
no_license
|
KRICT-DATA/TGNN_Band_gap
|
c26971fadfd0bf876f8440011696ec58afecc994
|
a95187fb40e544e2ee49e80de39a73dabf8e9a05
|
refs/heads/master
| 2023-01-23T18:56:59.864663 | 2020-12-04T01:34:57 | 2020-12-04T01:34:57 | 276,809,991 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 171 |
py
|
class AtomwiseCrystal:
def __init__(self, pairs, y, ref_feat, id):
self.pairs = pairs
self.y = y
self.ref_feat = ref_feat
self.id = id
|
[
"[email protected]"
] | |
169b16e41128a5b926ccbc2ac8fca63fb6ec4f99
|
ebb2360cd4f6c06a67a41c701f742672163dd9e1
|
/day17/day17.py
|
8dbaa2ad0d3aee430de37f7ec4c5c52fe7992c92
|
[] |
no_license
|
sephirothx/AdventOfCode2020
|
7387cae1e0617be4310cffe0ac63c6d28d2cc15b
|
0aaccc7de82329744d484281d2cb36c8860710f6
|
refs/heads/master
| 2023-02-06T16:11:00.984293 | 2020-12-25T18:10:51 | 2020-12-25T18:10:51 | 317,433,761 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 928 |
py
|
from itertools import product
inp = open('input.txt').read().splitlines()
def solve(dim, cycles=6):
def neighbors(c, count_self=True):
for delta in product([-1,0,1], repeat=dim):
if not count_self and all(d==0 for d in delta):
continue
yield tuple(x+d for x,d in zip(c,delta))
d = set()
for y, l in enumerate(inp):
for x, c in enumerate(l):
if c == '#':
d.add(tuple([x,y] + [0]*(dim-2)))
for _ in range(cycles):
s = set(n for c in d for n in neighbors(c))
new_d = set()
for c in s:
active = 0
for n in neighbors(c,False):
active += 1 if n in d else 0
if c in d and 2<=active<=3:
new_d.add(c)
elif c not in d and active == 3:
new_d.add(c)
d = new_d
return len(d)
print(solve(3))
print(solve(4))
|
[
"[email protected]"
] | |
d8e20f7f2f4ccbe12262661ca45a1a743d45b15e
|
a027d95404e5c8e63dc90a561ed595a58605ab72
|
/DAT310/Python/assignment 6/gradebook.py
|
7ee5ec53c7078dce3a1863d977d9f2156e342957
|
[] |
no_license
|
aribo17/repo
|
2d4e64cf8272780686eafa065db494de0cfdd035
|
b9f182d4a3a94c1a48ebf979889c5e832a732973
|
refs/heads/master
| 2022-11-30T11:28:45.831789 | 2018-04-30T19:11:54 | 2018-04-30T19:11:54 | 117,899,119 | 0 | 1 | null | 2022-11-25T15:56:39 | 2018-01-17T22:14:02 |
Python
|
UTF-8
|
Python
| false | false | 5,407 |
py
|
"""
Assignment 6B: Gradebook
"""
import os
HTML_FRAME_TOP = "<!DOCTYPE HTML>\n<html>\n<head>\n<title>{title}</title>\n" \
"<link rel=\"stylesheet\" href=\"{css_path}gradebook.css\"/>\n</head>\n<body>\n"
HTML_FRAME_BOTTOM = "</body>\n</html>\n"
class Gradebook(object):
def __init__(self):
self.__students = {} # dict with student_no as key and name as value
self.__grades = {}
self.__courses= {}
def __create_folders(self):
"""Generates folder structure."""
print("Generating folder structure ... ")
for d in ["courses", "semesters", "students"]:
os.makedirs("output/" + d, exist_ok=True)
def __load_data(self):
"""Loads data from input tsv files."""
# Load students
print("Loading students.tsv ...")
with open("students.tsv", "r") as f:
for line in f:
student_no, name = line.strip().split("\t")
self.__students[student_no] = name
# Load courses
print("Loading courses.tsv ...")
with open("courses.tsv", "r") as f:
for line in f:
course_code, course_name = line.strip().split("\t")
self.__courses[course_code] = course_name
# Load grades
print("Loading grades.tsv ...")
with open("grades.tsv", "r") as f:
for line in f:
student_no, course_code, semester, grade = line.strip().split("\t")
self.__grades[student_no] = grade
self.__semesters[semester] = grade
def __generate_student_files(self):
"""Generates HTML files for students."""
pass
def __generate_course_files(self):
"""Generates HTML files for courses."""
print("Generating course file ...")
with open("output/course.html", "w") as f:
f.write(HTML_FRAME_TOP.replace("{title}", "Gradebook course").replace("{css_path}", "../"))
f.write("<h2>Course<h2>")
f.write("<table>\n<thead>\n<tr><th>Student no</th><th>Name</th></tr>\n</thead>\n<tbody>\n")
for student_no, name in sorted(self.__students.items()):
row = "<tr><td><a href=\"students/{student_no}.html\">{student_no}</a></td><td>{name}</td></tr>\n"
f.write(row.replace("{student_no}", student_no).replace("{name}", name))
f.write("</tbody>\n</table>\n")
def __generate_semester_files(self):
"""Generates HTML files for semesters."""
print("Generating semester file ...")
with open("output/semester.html", "w") as f:
f.write(HTML_FRAME_TOP.replace("{title}", "Gradebook Semester").replace("{css_path}", "../"))
f.write("<h2>Semester<h2>")
f.write("<table>\n<thead>\n<tr><th>Student no</th><th>Name</th></tr>\n</thead>\n<tbody>\n")
for student_no, name in sorted(self.__students.items()):
row = "<tr><td><a href=\"students/{student_no}.html\">{student_no}</a></td><td>{name}</td></tr>\n"
f.write(row.replace("{student_no}", student_no).replace("{name}", name))
f.write("</tbody>\n</table>\n")
def __generate_index_file(self):
"""Generates the index HTML file."""
print("Generating index file ...")
with open("output/index.html", "w") as f:
f.write(HTML_FRAME_TOP.replace("{title}", "Gradebook Index").replace("{css_path}", "../"))
# list of students
f.write("<h2>Students</h2>")
f.write("<table>\n<thead>\n<tr><th>Student no</th><th>Name</th></tr>\n</thead>\n<tbody>\n")
for student_no, name in sorted(self.__students.items()):
row = "<tr><td><a href=\"students/{student_no}.html\">{student_no}</a></td><td>{name}</td></tr>\n"
f.write(row.replace("{student_no}", student_no).replace("{name}", name))
f.write("</tbody>\n</table>\n")
# list of courses
f.write("<h2>Courses</h2>")
f.write("<table>\n<thead>\n<tr><th>Course code</th><th>Name</th></tr>\n</thead>\n<tbody>\n")
for course_code, course_name in sorted(self.__courses.item()):
row = "<tr><td><a href=\"courses/{course_code}.html\">{course_code}</a></td><td>{course_name}</td></tr>\n"
f.write(row.replace("{course_code}", course_code).replace("{course_name", course_name))
f.write("</tbody>\n</table>\n")
# list of semesters
f.write("<h2>Semesters</h2>")
f.write("<table>\n<thead>\n<tr><th>Semester</th><th>Course code</th></tr>\n</thead>\n<tbody>\n")
for semester, course_code in sorted(self.__semester.item()):
row = "<tr><td><a href=\"semesters/{semester}.html\">{semester}</a></td><td>{course_code}</td></tr>\n"
f.write(row.replace("{semester}", semester).replace("{course_code", course_code))
f.write("</tbody>\n</table>\n")
f.write(HTML_FRAME_BOTTOM)
def generate_files(self):
self.__create_folders()
self.__load_data()
self.__generate_student_files()
self.__generate_course_files()
self.__generate_semester_files()
self.__generate_index_file()
def main():
gradebook = Gradebook()
gradebook.generate_files()
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
46d146530af78700cfd74beb3f489023f1b7682d
|
f5b62ca8d62efb831001306f8b402bbe6f71a373
|
/catkin_ws/src/cmake-build-debug/localization/packages/orb_localizer/cmake/orb_localizer-genmsg-context.py
|
94ef4ef86877787d6aa903bf217007a6c30b96eb
|
[] |
no_license
|
Dokirobot-autonomous/localization_ros
|
0771fa47a37200467707a13b23f62ab8036dfb74
|
564b656637afb84aaa7fe6e069f379f71d61a7b8
|
refs/heads/master
| 2020-03-30T08:33:51.591463 | 2018-10-16T03:43:49 | 2018-10-16T03:43:49 | 151,024,870 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 649 |
py
|
# generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/ohashi/localization_ws/catkin_ws/src/localization/packages/orb_localizer/msg/debug.msg"
services_str = ""
pkg_name = "orb_localizer"
dependencies_str = "std_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "orb_localizer;/home/ohashi/localization_ws/catkin_ws/src/localization/packages/orb_localizer/msg;std_msgs;/opt/ros/kinetic/share/std_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/kinetic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
|
[
"[email protected]"
] | |
b540a1018ada187e4e6e105e8d050f936df3061b
|
f416ab3adfb5c641dc84022f918df43985c19a09
|
/problems/advent-of-code/2022/05/sol2.py
|
78cf7599b31d96f7b01fd8ad778ed956290eda79
|
[] |
no_license
|
NicoKNL/coding-problems
|
a4656e8423e8c7f54be1b9015a9502864f0b13a5
|
4c8c8d5da3cdf74aefcfad4e82066c4a4beb8c06
|
refs/heads/master
| 2023-07-26T02:00:35.834440 | 2023-07-11T22:47:13 | 2023-07-11T22:47:13 | 160,269,601 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,530 |
py
|
import sys
def splitInput(lines):
stack_data = []
moves = []
parsing_stack = True
for line in lines:
if not line:
parsing_stack = False
continue
if parsing_stack:
stack_data.append(line)
else:
moves.append(line)
stack_count = int(stack_data[-1].split()[-1])
return stack_count, stack_data[:-1], moves
def parseStacks(count, data):
stacks = [[] for _ in range(count)]
for row in data:
print(row)
for i, c in enumerate(range(1, len(row), 4)):
if row[c].strip():
stacks[i].append(row[c])
stacks = [stack[::-1] for stack in stacks]
return stacks
def parseMoves(moves):
for i in range(len(moves)):
words = moves[i].split()
move = [words[1], words[3], words[5]] # [count, from, to]
move = list(map(int, move))
move[1] -= 1 # Use 0 based indexing
move[2] -= 1
moves[i] = move
def execute(moves, stacks):
for (count, s, t) in moves:
stacks[t].extend(stacks[s][-count:])
stacks[s] = stacks[s][:-count]
if __name__ == "__main__":
lines = [l[:-1] for l in sys.stdin]
stack_count, stack_data, moves = splitInput(lines)
stacks = parseStacks(stack_count, stack_data)
parseMoves(moves)
execute(moves, stacks)
answer = [" " for _ in range(stack_count)]
for i, stack in enumerate(stacks):
if stack:
answer[i] = stack[-1]
print("".join(answer))
|
[
"[email protected]"
] | |
3d6f97b441256fac9b6cb66b3166f809ccf49ec4
|
23d952d7d52ce824fdd93f7f6cdb60c962846524
|
/3-4_Preparation+Computation/ingredients/scripts/similarity_basic.py
|
8ee5c8d3300d8de60c878e88403d73d332e40dd0
|
[] |
no_license
|
BigData-Team8/Italian-Cuisine
|
9152b3ec73f86081a3bbab9d7aa3078ca3648011
|
37146be0cf98d80d22400b8d2d94cd9e20159237
|
refs/heads/master
| 2022-12-05T11:01:27.199447 | 2020-08-19T21:13:55 | 2020-08-19T21:13:55 | 287,547,525 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,610 |
py
|
import json
import os, sys
import redis
import nltk
import pandas as pd
"""
from IPython.display import display
"""
pd.set_option('display.max_rows', 5000)
pd.set_option('display.max_columns', 5000)
pd.set_option('display.width', 10000)
# from __future__ import print_function
from nltk.metrics import *
csv = 'ingredients_freq-complete.csv'
df = pd.read_csv(csv, usecols = ['Ingredient', 'Freq', 'Freq_Cucchiaio', 'Freq_GZ', 'Freq_RR' ])
df = df.sort_values(by = ['Ingredient'], ascending = False)
result = {}
i = 0
for indexS, rowS in df.iterrows():
sourceIng = rowS['Ingredient']
sourceFreq = rowS['Freq']
if (sourceFreq == 1):
i += 1
# print(i, sourceIng, sourceFreq)
# for key in result:
# print(key, result[key])
for indexD, rowD in df.iterrows():
destIng = rowD['Ingredient']
destFreq = rowD['Freq']
if (sourceIng != destIng):
distance = edit_distance(sourceIng, destIng)
# https://stackoverflow.com/questions/45783385/normalizing-the-edit-distance
normalizedDistance = distance / max(len(sourceIng), len(destIng))
if (normalizedDistance < 0.15 ):
# in this case the frequency of the source ingredient is higher than the frequency of the destination one
if (sourceFreq > destFreq):
result[destIng] = sourceIng
elif (sourceFreq < destFreq):
result[sourceIng] = destIng
# equals
else:
result[destIng] = sourceIng
print(sourceIng, '(', sourceFreq, ') => ', destIng, '(', destFreq, ') | distance = ', normalizedDistance)
|
[
"[email protected]"
] | |
1e4c01ecb330a82e978502397ca4207a7ecf25fd
|
e3d030829e30e9357d9abcf369b89f23f9cb265d
|
/Lab 1 API Pam Fields 1-9-2018.py
|
e749e9ada200e3208a05c9a842564a18097659b1
|
[] |
no_license
|
pamsfields/PytonAPI
|
d382875df9ea5c12442d98a80a5b241e23c0215b
|
0139fcc58bd46f92301dbbcad70d91d42a2ce58b
|
refs/heads/master
| 2021-05-13T20:18:44.298069 | 2018-01-11T03:02:39 | 2018-01-11T03:02:39 | 116,910,294 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 472 |
py
|
import requests
import os
key = os.environ['fixer_key']
base_url = "https://fixer.io/latest?symbols="
currency = input('What is the first country to compare currency exchanges? Please use the three letter abbreviation ')
params = {'fixer_key', 't' : currency}
data = dict(dict(requests.get("https://api.fixer.io/2018-01-10").json()).get("rates")).get(currency)
print(data)
print("Current Exchange rate with Euro:")
print(data['rates'][0]['Value'])
|
[
"[email protected]"
] | |
8793d1216da14a67dcf3182d0cb2892449856995
|
7f601fdb97c992a96e93a9d02e3720d57e191fcf
|
/djintl/settings.py
|
98db185b64269d243e4d51d6d01b8fc24e89906c
|
[] |
no_license
|
cipang/djintl
|
796f9f95b65e82e24bf8c27dff16c24ef50a5bcb
|
27b28161e8fe9ceee76fb3167d9b72b44a8cfcbc
|
refs/heads/main
| 2023-09-01T12:44:18.115785 | 2021-11-06T02:24:49 | 2021-11-06T02:24:49 | 425,121,072 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,521 |
py
|
"""
Django settings for djintl project.
Generated by 'django-admin startproject' using Django 3.2.9.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
from django.utils.translation import gettext_lazy as _
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-1s)aaaaaaaaa&%x#8(*9q&9yy!p00!3=mn0*&m-cvd=aq1f$$d'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'mainapp',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djintl.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.i18n',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djintl.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
LANGUAGE_CODE = 'en'
LANGUAGES = [
('en', _('English')),
('zh', _('Chinese'))
]
LOCALE_PATHS = [BASE_DIR / "locale"]
|
[
"[email protected]"
] | |
a31688d8579cfce253b6dac4f680333340f6b0e4
|
0bde5f7f09aa537ed1f4828d4e5ebee66475918f
|
/h2o-py/tests/testdir_sklearn/pyunit_sklearn_params.py
|
2a70a91baafd68393e95b43969166ffea1f8a2ea
|
[
"Apache-2.0"
] |
permissive
|
Winfredemalx54/h2o-3
|
d69f1c07e1f5d2540cb0ce5e6073415fa0780d32
|
dfb163c82ff3bfa6f88cdf02465a9bb4c8189cb7
|
refs/heads/master
| 2022-12-14T08:59:04.109986 | 2020-09-23T08:36:59 | 2020-09-23T08:36:59 | 297,947,978 | 2 | 0 |
Apache-2.0
| 2020-09-23T11:28:54 | 2020-09-23T11:28:54 | null |
UTF-8
|
Python
| false | false | 7,702 |
py
|
from __future__ import print_function
import os, sys
from sklearn.pipeline import Pipeline
from h2o.sklearn import H2OAutoMLEstimator, H2OGradientBoostingEstimator, H2OScaler, H2OPCA
sys.path.insert(1, os.path.join("..",".."))
from tests import pyunit_utils
seed = 2019
def test_all_params_are_visible_in_get_params():
pipeline = Pipeline([
('standardize', H2OScaler(center=True, scale=False)),
('pca', H2OPCA(k=2, seed=seed)),
('estimator', H2OGradientBoostingEstimator(ntrees=20, max_depth=5, seed=seed))
])
params = pipeline.get_params()
assert isinstance(params['standardize'], H2OScaler)
assert params['standardize__center'] is True
assert params['standardize__scale'] is False
assert isinstance(params['pca'], H2OPCA)
assert params['pca__k'] == 2
assert params['pca__seed'] == seed
assert isinstance(params['estimator'], H2OGradientBoostingEstimator)
assert params['estimator__ntrees'] == 20
assert params['estimator__max_depth'] == 5
assert params['estimator__seed'] == seed
# also the ones that were not set explicitly
assert params['pca__max_iterations'] is None
assert params['estimator__learn_rate'] is None
def test_all_params_can_be_set_using_set_params():
pipeline = Pipeline([
('standardize', H2OScaler()),
('pca', H2OPCA()),
('estimator', H2OGradientBoostingEstimator())
])
pipeline.set_params(
standardize__center=True,
standardize__scale=False,
pca__k=2,
pca__seed=seed,
estimator__ntrees=20,
estimator__max_depth=5,
estimator__seed=seed
)
assert isinstance(pipeline.named_steps.standardize, H2OScaler)
assert pipeline.named_steps.standardize.center is True
assert pipeline.named_steps.standardize.scale is False
assert isinstance(pipeline.named_steps.pca, H2OPCA)
assert pipeline.named_steps.pca.k == 2
assert pipeline.named_steps.pca.seed == seed
assert isinstance(pipeline.named_steps.estimator, H2OGradientBoostingEstimator)
assert pipeline.named_steps.estimator.ntrees == 20
assert pipeline.named_steps.estimator.max_depth == 5
assert pipeline.named_steps.estimator.seed == seed
def test_all_params_are_accessible_as_properties():
pipeline = Pipeline([
('standardize', H2OScaler(center=True, scale=False)),
('pca', H2OPCA(k=2, seed=seed)),
('estimator', H2OGradientBoostingEstimator(ntrees=20, max_depth=5, seed=seed))
])
assert isinstance(pipeline.named_steps.standardize, H2OScaler)
assert pipeline.named_steps.standardize.center is True
assert pipeline.named_steps.standardize.scale is False
assert isinstance(pipeline.named_steps.pca, H2OPCA)
assert pipeline.named_steps.pca.k == 2
assert pipeline.named_steps.pca.seed == seed
assert isinstance(pipeline.named_steps.estimator, H2OGradientBoostingEstimator)
assert pipeline.named_steps.estimator.ntrees == 20
assert pipeline.named_steps.estimator.max_depth == 5
assert pipeline.named_steps.estimator.seed == seed
# also the ones that were not set explicitly
assert pipeline.named_steps.pca.max_iterations is None
assert pipeline.named_steps.estimator.learn_rate is None
def test_all_params_can_be_set_as_properties():
pipeline = Pipeline([
('standardize', H2OScaler()),
('pca', H2OPCA()),
('estimator', H2OGradientBoostingEstimator())
])
pipeline.named_steps.standardize.center = True
pipeline.named_steps.standardize.scale = False
pipeline.named_steps.pca.k = 2
pipeline.named_steps.pca.seed = seed
pipeline.named_steps.estimator.ntrees = 20
pipeline.named_steps.estimator.max_depth = 5
pipeline.named_steps.estimator.seed = seed
params = pipeline.get_params()
assert isinstance(params['standardize'], H2OScaler)
assert params['standardize__center'] is True
assert params['standardize__scale'] is False
assert isinstance(params['pca'], H2OPCA)
assert params['pca__k'] == 2
assert params['pca__seed'] == seed
assert isinstance(params['estimator'], H2OGradientBoostingEstimator)
assert params['estimator__ntrees'] == 20
assert params['estimator__max_depth'] == 5
assert params['estimator__seed'] == seed
def test_params_conflicting_with_sklearn_api_are_still_available():
pca = H2OPCA()
assert pca.transform != 'NONE'
assert callable(pca.transform), "`transform` method from sklearn API has been replaced by a property"
# conflicting param can be accessed normally using get_params()
assert pca.get_params()['transform'] == 'NONE'
# property is accessible directly using a trailing underscore
assert pca.transform_ == 'NONE'
pca = H2OPCA(transform='DEMEAN')
assert callable(pca.transform), "`transform` method from sklearn API has been replaced by a property"
assert pca.get_params()['transform'] == 'DEMEAN'
assert pca.transform_ == 'DEMEAN'
# conflicting param can be modified normally using set_params()
pca.set_params(transform='DESCALE')
assert pca.get_params()['transform'] == 'DESCALE'
assert pca.transform_ == 'DESCALE'
# conflicting property can be set directly using a trailing underscore
pca.transform_ = 'NORMALIZE'
assert pca.get_params()['transform'] == 'NORMALIZE'
assert pca.transform_ == 'NORMALIZE'
def test_params_are_correctly_passed_to_underlying_transformer():
pca = H2OPCA(seed=seed)
pca.set_params(transform='DEMEAN', k=3)
pca.model_id = "dummy"
assert pca.estimator is None
pca._make_estimator() # normally done when calling `fit`
assert pca.estimator
parms = pca.estimator._parms
assert parms['seed'] == seed
assert parms['transform'] == 'DEMEAN'
assert parms['k'] == 3
assert parms['model_id'] == "dummy"
assert parms['max_iterations'] is None
def test_params_are_correctly_passed_to_underlying_estimator():
estimator = H2OGradientBoostingEstimator(seed=seed)
estimator.set_params(max_depth=10, learn_rate=0.5)
estimator.model_id = "dummy"
assert estimator.estimator is None
estimator._make_estimator() # normally done when calling `fit`
real_estimator = estimator.estimator
assert real_estimator
parms = real_estimator._parms
assert real_estimator.seed == parms['seed'] == seed
assert real_estimator.max_depth == parms['max_depth'] == 10
assert real_estimator.learn_rate == parms['learn_rate'] == 0.5
assert real_estimator._id == parms['model_id'] == "dummy"
assert real_estimator.training_frame == parms['training_frame'] is None
def test_params_are_correctly_passed_to_underlying_automl():
estimator = H2OAutoMLEstimator(seed=seed)
estimator.set_params(max_models=5, nfolds=0)
estimator.project_name = "dummy"
assert estimator.estimator is None
estimator._make_estimator() # normally done when calling `fit`
aml = estimator.estimator
assert aml
assert aml.build_control["stopping_criteria"]["seed"] == seed
assert aml.build_control["stopping_criteria"]["max_models"] == 5
assert aml.build_control["nfolds"] == 0
assert aml.build_control["project_name"] == "dummy"
pyunit_utils.run_tests([
test_all_params_are_visible_in_get_params,
test_all_params_can_be_set_using_set_params,
test_all_params_are_accessible_as_properties,
test_all_params_can_be_set_as_properties,
test_params_conflicting_with_sklearn_api_are_still_available,
test_params_are_correctly_passed_to_underlying_transformer,
test_params_are_correctly_passed_to_underlying_estimator,
test_params_are_correctly_passed_to_underlying_automl,
])
|
[
"[email protected]"
] | |
6699c77e6bdac3ab961e9ae3e1ad3f3c9aaf04a2
|
7112e0e4b4b63228b9e0b4f0931a1870ffe54728
|
/src/tests/gate-basic-bionic-stein
|
6e24087e8b31a39c4f9cfc77d2120a4aedbc31ce
|
[
"Apache-2.0"
] |
permissive
|
jlamendo/charm-designate
|
d45017a0ef904276d24eff9e84de38e9324b156c
|
32c71708df04c02ea5fab335246344e03c39f70f
|
refs/heads/master
| 2020-06-18T02:12:54.123654 | 2019-04-19T19:27:49 | 2019-04-19T19:27:49 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 917 |
#!/usr/bin/env python
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Amulet tests on a basic aodh deployment on bionic-stein."""
from basic_deployment import DesignateBasicDeployment
if __name__ == '__main__':
deployment = DesignateBasicDeployment(
series='bionic',
openstack='cloud:bionic-stein',
source='cloud:bionic-stein')
deployment.run_tests()
|
[
"[email protected]"
] | ||
e701b146176a8058895966374a94283d46d3e007
|
70fc834daab609e40dc1d6f67cef7afc525b0ace
|
/main5.py
|
4a91c903c9ad310e98536ff11feb0a256a1b6a07
|
[] |
no_license
|
Edvard-Hagerup-Grieg/UNN-AppMath
|
f8c78921e5ae2392ea0d9d8c1f170beacd6b39f4
|
fca3f7e79a98741c9c910895c47141909b6062e6
|
refs/heads/master
| 2020-04-26T09:16:12.772794 | 2019-04-15T06:14:29 | 2019-04-15T06:14:29 | 173,449,702 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,261 |
py
|
import matplotlib.pyplot as plt
import numpy as np
import math
def f(x, r):
return r*x*(1 - x)
def df(x, r):
return r - 2*r*x
if __name__ == "__main__":
# LAMEREY DIAGRAM & SYSTEM EVALUTION
RUN = True
if RUN:
x0 = 0.4
r = 3.46
xn = [x0]
x = [x0]
y = [0]
for i in range(500):
x1 = f(x0, r)
x.append(x0)
x.append(x1)
y.append(x1)
y.append(x1)
xn.append(x1)
x0 = x1
plt.figure(figsize=(10,4))
plt.subplot(1, 2, 1)
plt.plot(range(100), xn[:100], color='black', linewidth=0.7)
plt.title('SYSTEM EVALUTION')
plt.xlabel('n')
plt.ylabel('x(n)')
plt.subplot(1,2,2)
plt.plot(x,y,alpha=0.7,color='red', linewidth =0.7, linestyle='--', label='')
plt.plot(np.arange(0.0, 1.0, 0.01), np.arange(0.0, 1.0, 0.01), linewidth =0.4, color = 'black',label='x(n+1)=x(n)')
plt.plot(np.arange(0.0, 1.0, 0.01), [f(xn, r) for xn in np.arange(0.0, 1.0, 0.01)], linewidth =1, color = 'black', label='x(n+1)')
plt.title('LAMEREY DIAGRAM')
plt.xlabel('x(n)')
plt.ylabel('x(n+1)')
plt.show()
# BIFURCATION DIAGRAM & LYAPUNOV EXPONENT
RUN = True
if RUN:
x0 = 0.1
X = []
L = []
for r in np.arange(0.8, 4.00, 0.001):
x = x0
ln = math.log(abs(df(x0, r)))
xn = []
for i in range(1000):
x = f(x, r)
xn.append(x)
ln += math.log(abs(df(x, r)))
X.append(xn[-200:])
L.append(ln / 1000)
X = np.array(X)
plt.figure(figsize=(10, 8))
plt.subplot(2, 1, 1)
for i in range(X.shape[1]):
plt.scatter(np.arange(0.8, 4.00, 0.001), X[:,i], s=0.1, c='black')
plt.title('BIFURCATION DIAGRAM')
plt.xlabel('r')
plt.ylabel('x*')
plt.subplot(2, 1, 2)
plt.plot(np.arange(0.8, 4.00, 0.001), L, color='black')
plt.plot(np.arange(0.8, 4.00, 0.001), [0]* 3200, color='red')
plt.title('LYAPUNOV EXPONENT')
plt.xlabel('r')
plt.ylabel('L')
plt.show()
|
[
"[email protected]"
] | |
664dbeace03e2e09329cf482497d526407c70473
|
05a49b7d914ff4dea0d31d14801a45b571d164f9
|
/Exercism Exercises/leap.py
|
a6093d71f37f3e355112768b6a6529bf68b5c881
|
[] |
no_license
|
Kalpesh-Makwana/Python
|
68d80eb0ad2987188e3142c5a6be3b360bc284fb
|
5bf00776a060d6ecb7cc92644da093ac38a6b19f
|
refs/heads/master
| 2020-12-02T14:16:54.573079 | 2020-04-29T13:11:50 | 2020-04-29T13:11:50 | 231,034,090 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 118 |
py
|
def leap_year(year):
if year % 4 == 0 and year % 100 != 0 or year % 400==0:
return True
return False
|
[
"[email protected]"
] | |
ab1103ef8f833e5068f656a077eaf3f8f99bcfb7
|
dedb4e01ed0c640612eae17f84bf7ed3da8bce17
|
/backend/photosets/api/serializers.py
|
b1472a3ae238638526e4b9580e0fa04562793c1d
|
[] |
no_license
|
yakimka/chubbyfolio
|
6f26f769fcdfe4714200966188b3f182859ff6fd
|
8c8d90a10eb794154d2b283e0795ce327bb78112
|
refs/heads/master
| 2022-07-24T16:20:55.959859 | 2022-05-04T19:01:51 | 2022-05-04T19:01:51 | 174,234,542 | 0 | 0 | null | 2022-05-04T00:31:03 | 2019-03-06T23:04:40 |
Python
|
UTF-8
|
Python
| false | false | 805 |
py
|
from rest_framework import serializers
from photosets.models import Photoset, Photo
class PhotoSerializer(serializers.ModelSerializer):
thumbnail = serializers.ImageField()
class Meta:
model = Photo
fields = ('id', 'is_cover', 'image', 'thumbnail',
'date_created', 'date_updated')
class PhotosetSerializer(serializers.ModelSerializer):
cover = serializers.ImageField()
preview = serializers.ImageField(source='preview_thumbnail')
class Meta:
model = Photoset
fields = ('id', 'name', 'description', 'cover', 'preview', 'show_on_mainpage',
'published', 'date_created', 'date_updated')
class FilterPhotosetsSerializer(serializers.Serializer):
show_on_mainpage = serializers.NullBooleanField(required=False)
|
[
"[email protected]"
] | |
73b575925f7a11a6fcf9163b2beb965b63a55b09
|
a74b58651b4cc7027fce6856d46bb7325a8a949b
|
/BuildAndroidApplication/src/demo/buildproject/copy_file.py
|
45e7543d3cad8b2b54a2d475a1d8ca65c3f9053a
|
[] |
no_license
|
doncc/worktools
|
b98890bbba149e29962e077e47b2834bd6c46b28
|
d9b52d987c1dda10fda5c033e7225b15d3121b87
|
refs/heads/master
| 2023-01-24T00:58:25.472987 | 2020-12-01T07:06:39 | 2020-12-01T07:06:39 | 113,950,647 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,193 |
py
|
# coding=UTF-8
import os
import shutil
import constant
# 通过校验MD5 判断B内的文件与A 不同
def get_MD5(file_path):
files_md5 = os.popen('md5 %s' % file_path).read().strip()
file_md5 = files_md5.replace('MD5 (%s) = ' % file_path, '')
return file_md5
# 拷贝整个目录及内容至新目录
def cpDirs(old_path, new_path):
if os.path.exists(os.path.join(new_path , constant.temp_application_name)):
print 'apps下存在,不创建'
return
for files in os.listdir(old_path):
name = os.path.join(old_path, files)
back_name = os.path.join(new_path, files)
if os.path.isfile(name):
if os.path.isfile(back_name):
if get_MD5(name) != get_MD5(back_name):
shutil.copy(name, back_name)
else:
shutil.copy(name, back_name)
else:
if not os.path.isdir(back_name):
os.makedirs(back_name)
cpDirs(name, back_name)
def copy_file(old_path, new_path):
temp_path = new_path[0: new_path.rfind('/')]
if not os.path.exists(temp_path):
os.makedirs(temp_path)
shutil.copy2(old_path, new_path)
|
[
"[email protected]"
] | |
887c91e48e6d269539425d8db02eb00ba0c9ec36
|
fe6ed003243e2f57fc4cbe82fe71492000718db9
|
/bcbio/chipseq/peaks.py
|
86f6fdafa93bb2a0794ae7b64e3b5b874b24df6f
|
[
"MIT"
] |
permissive
|
gifford-lab/bcbio-nextgen
|
e6f1af9e1bc6afacd2ce4c785af782508732c920
|
52156f1aa6167b870e4387d1976efbd104d2c30c
|
refs/heads/master
| 2021-01-17T22:11:57.474808 | 2016-01-22T10:13:23 | 2016-01-22T10:15:08 | 45,060,650 | 0 | 1 | null | 2015-10-27T18:14:21 | 2015-10-27T18:14:21 | null |
UTF-8
|
Python
| false | false | 2,840 |
py
|
"""High level parallel SNP and indel calling using multiple variant callers.
"""
import os
import copy
from bcbio.log import logger
from bcbio import bam, utils
from bcbio.pipeline import datadict as dd
from bcbio.chipseq import macs2
# from bcbio.pipeline import region
def get_callers():
from bcbio.chipseq import macs2
return {"macs2": macs2.run}
def peakcall_prepare(data, run_parallel):
"""Entry point for doing peak calling"""
caller_fns = get_callers()
to_process = []
for sample in data:
mimic = copy.copy(sample[0])
for caller in dd.get_peakcaller(sample[0]):
if caller in caller_fns and dd.get_phenotype(mimic) == "chip":
mimic["peak_fn"] = caller
name = dd.get_sample_name(mimic)
mimic = _get_paired_samples(mimic, data)
if mimic:
to_process.append(mimic)
else:
logger.info("Skipping peak calling. No input sample for %s" % name)
if to_process:
after_process = run_parallel("peakcalling", to_process)
data = _sync(data, after_process)
return data
def calling(data):
"""Main function to parallelize peak calling."""
chip_bam = dd.get_work_bam(data)
input_bam = data["work_bam_input"]
caller_fn = get_callers()[data["peak_fn"]]
name = dd.get_sample_name(data)
out_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), data["peak_fn"], name ))
out_file = caller_fn(name, chip_bam, input_bam, dd.get_genome_build(data), out_dir, data["config"])
data["peaks_file"] = out_file
return [[data]]
def _sync(original, processed):
"""
Add output to data if run sucessfully.
For now only macs2 is available, so no need
to consider multiple callers.
"""
for original_sample in original:
original_sample[0]["peaks_file"] = []
for processs_sample in processed:
if dd.get_sample_name(original_sample[0]) == dd.get_sample_name(processs_sample[0]):
if utils.file_exists(processs_sample[0]["peaks_file"]):
original_sample[0]["peaks_file"].append(processs_sample[0]["peaks_file"])
return original
def _get_paired_samples(sample, data):
"""Get input sample for each chip bam file."""
dd.get_phenotype(sample)
for origin in data:
if dd.get_batch(sample) in dd.get_batch(origin[0]) and dd.get_phenotype(origin[0]) == "input":
sample["work_bam_input"] = dd.get_work_bam(origin[0])
return [sample]
def _get_multiplier(samples):
"""Get multiplier to get jobs
only for samples that have input
"""
to_process = 1
for sample in samples:
if dd.get_phenotype(sample[0]) == "chip":
to_process += 1
return to_process / len(samples)
|
[
"[email protected]"
] | |
fd1276b24ae67a7c8adc75699c9802f83e24adba
|
e00ecd6c7a0f7e18aaac99b14df7f9c2d72ca421
|
/roll_pic.py
|
61c1749e9c0c343d6aeb49265c8f1f043ceadd31
|
[] |
no_license
|
10L2002/githubtest
|
2981cf0e9d37c3ba30766bfcd5744110648d57cf
|
e534b919e52fb62391d6a80b018281ff6393ad37
|
refs/heads/master
| 2020-03-18T16:38:21.357583 | 2018-06-02T20:32:26 | 2018-06-02T20:32:26 | 134,977,521 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,442 |
py
|
def roll_pic(src): #roll sholip logo at random angle
import cv2
import numpy as np
import random
import os
if not os.path.exists("roll_data"): #name of saving directory
os.mkdir("roll_data")
# 画像読み込み(read image)
h, w = src.shape[:2]
size = (w, h)
# 回転角の指定(decide the angle)
x=(random.randint(0, 360))
#print(x)
angle = x
angle_rad = angle/180.0*np.pi
# 回転後の画像サイズを計算(caluculate the size of image after rotation)
w_rot = int(np.round(h*np.absolute(np.sin(angle_rad))+w*np.absolute(np.cos(angle_rad))))
h_rot = int(np.round(h*np.absolute(np.cos(angle_rad))+w*np.absolute(np.sin(angle_rad))))
size_rot = (w_rot, h_rot)
# 元画像の中心を軸に回転する(pick the center from original image and rotate)
center = (w/2, h/2)
scale = 1.0
rotation_matrix = cv2.getRotationMatrix2D(center, angle, scale)
# 平行移動を加える (rotation + translation)
affine_matrix = rotation_matrix.copy()
affine_matrix[0][2] = affine_matrix[0][2] -w/2 + w_rot/2
affine_matrix[1][2] = affine_matrix[1][2] -h/2 + h_rot/2
img_rot = cv2.warpAffine(src, affine_matrix, size_rot, flags=cv2.INTER_CUBIC)
cv2.imwrite("roll_data/" +"img_roll.jpeg" ,img_rot)
#import cv2
#import numpy as np
#import random
#import os
#src_img = cv2.imread("./pra/sholip.png")
#roll_pic(src_img)
|
[
"[email protected]"
] | |
018bc11e66b4722cfd30143ace9c0cb24c48ad82
|
45bf1df34f93b3148c62b01afd8d20db889a2148
|
/TFIDF.py
|
5d6c495e97e243f0d2b993b82de1f63c36aa9f9b
|
[] |
no_license
|
Suhail98/NLP-machine-learning-classifier-for-reviews
|
72ceda4cfae879482aae80b13b6adad140131a8b
|
7ee8fd6d82e76385433c14576962f9daa2cfe8d4
|
refs/heads/main
| 2023-07-12T19:42:44.724764 | 2021-08-23T13:35:23 | 2021-08-23T13:35:23 | 399,116,135 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,871 |
py
|
import os
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from numpy import array
from sklearn.linear_model import LogisticRegression
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.corpus import stopwords
import re
#reading data and shuffle
def read_data():
arr = os.listdir("review_polarity\\txt_sentoken\\neg")
data = []
for i in arr:
f = open("review_polarity\\txt_sentoken\\neg\\" + i, encoding='utf-8')
data += [f.read(),0]
f.close()
arr = os.listdir("review_polarity\\txt_sentoken\\pos")
for i in arr:
f = open("review_polarity\\txt_sentoken\\pos\\" + i, encoding='utf-8')
data += [f.read(),1]
f.close()
data = np.array(data).reshape(2000,2)
np.random.shuffle(data)
return data
#
def split_data(data,n):
data_train,lable_train = data[:n,0], data[:n,1]
data_test,lable_test = data[n:,0], data[n:,1]
return data_train,lable_train,data_test,lable_test
data = read_data()
nTrain = int(2000*.8)
data_train,label_train,data_test,label_test = split_data(data,nTrain)
vectorizer = TfidfVectorizer(stop_words='english')
dfidf_train = vectorizer.fit_transform(data_train)
df_idf=[]
for i in dfidf_train:
df_idf.append(array(i.todense()).flatten().tolist())
clf = LogisticRegression(random_state=0,C=10).fit(df_idf, label_train)
dfidf_test = vectorizer.transform(data_test)
result = clf.predict(dfidf_test.todense())
count = 0
for i in range(len(result)):
if result[i] == label_test[i]:
count += 1
print("test accuracy = ",count / (2000-nTrain))
input_review = input("Enter your review: ")
dfidf_test = vectorizer.transform([input_review])
result = clf.predict(dfidf_test)
if result[0] == '0':
print("negative")
else:
print("positive")
|
[
"[email protected]"
] | |
9a8248760bfd5e16d1a36428beb0e18834029e85
|
3c7913e65c375b3661060f22b4daae83ff1a13b2
|
/e.py
|
28cd7d5ee70775f7970dd4f70bcaf7d25a20e189
|
[] |
no_license
|
Deepesh-Kumar/start-python
|
8107e71efe8e2233119632ea4019f75e75148830
|
608f6f14a861fbc962dee07dd8988391890abbff
|
refs/heads/master
| 2021-08-06T02:13:26.754973 | 2019-01-07T03:52:19 | 2019-01-07T03:52:19 | 143,205,114 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 452 |
py
|
#def m(list):
# b = 1
# for i in a:
# b = b * i
# print b
#a = [1,2,3,4,5]
#m(a)
ol = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
a ='fun thmes!'
op ='aeiou'
k = []
for i in a:
if i.lower() in ol:
i = ol[i + 1]
print i
#for i in a:
# if i not in b:
# b.append(i)
#print b
#def checkduplicate(a):
# for i in a:
# if i not in b:
# b.append(i)
# print b
#checkduplicate(d)
|
[
"[email protected]"
] | |
376af8d3129802fbb8400585f4e33af5edcb5c18
|
e22051c23ede600098376508f43121c9fa35d88d
|
/rllib_inference/src/battlesnake_heuristics.py
|
88b21f2a0d27dc0d806d41622287b692028356ba
|
[] |
no_license
|
iamwillzhu/LocalEnv
|
7265979fe3103b75f459277c5704a5a1f2d2e3df
|
4f30d1a3ab0216b12a3a6caca5c134c689affe9d
|
refs/heads/master
| 2022-11-18T15:28:55.486601 | 2020-07-19T21:50:21 | 2020-07-19T21:50:21 | 280,525,426 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,484 |
py
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import numpy as np
import random
from .a_star_search import a_star
from .snake_state import State
class MyBattlesnakeHeuristics:
'''
The BattlesnakeHeuristics class allows you to define handcrafted rules of the snake.
'''
FOOD_INDEX = 0
def __init__(self):
pass
def tail_chase(self, json):
your_snake_body = json["you"]["body"]
i, j = your_snake_body[0]["y"], your_snake_body[0]["x"]
if len(your_snake_body) < 3:
return None,None
path = a_star(initial_state=State(
body=your_snake_body,
board_height=json["board"]["height"],
board_width=json["board"]["width"]))
next_move = path[1].body[0]
tail_direction = None
if next_move["y"] == i - 1:
tail_direction = 0
if next_move["y"] == i + 1:
tail_direction = 1
if next_move["x"] == j - 1:
tail_direction = 2
if next_move["x"] == j + 1:
tail_direction = 3
return next_move, tail_direction
def go_to_food_if_close(self, state, json):
'''
Example heuristic to move towards food if it's close to you.
'''
# Get the position of the snake head
your_snake_body = json["you"]["body"]
i, j = your_snake_body[0]["y"], your_snake_body[0]["x"]
# Set food_direction towards food
food = state[:, :, self.FOOD_INDEX]
# Note that there is a -1 border around state so i = i + 1, j = j + 1
if -1 in state:
i, j = i+1, j+1
food_direction = None
if food[i-1, j] == 1:
food_direction = 0 # up
if food[i+1, j] == 1:
food_direction = 1 # down
if food[i, j-1] == 1:
food_direction = 2 # left
if food[i, j+1] == 1:
food_direction = 3 # right
return food_direction
def run(self, state, snake_id, turn_count, health, json, action):
'''
The main function of the heuristics.
Parameters:
-----------
`state`: np.array of size (map_size[0]+2, map_size[1]+2, 1+number_of_snakes)
Provides the current observation of the gym.
Your target snake is state[:, :, snake_id+1]
`snake_id`: int
Indicates the id where id \in [0...number_of_snakes]
`turn_count`: int
Indicates the number of elapsed turns
`health`: dict
Indicates the health of all snakes in the form of {int: snake_id: int:health}
`json`: dict
Provides the same information as above, in the same format as the battlesnake engine.
`action`: np.array of size 4
The qvalues of the actions calculated. The 4 values correspond to [up, down, left, right]
'''
log_string = ""
# The default `best_action` to take is the one that provides has the largest Q value.
# If you think of something else, you can edit how `best_action` is calculated
best_action = int(np.argmax(action))
if health[snake_id] > 70:
next_move,tail_direction = self.tail_chase(json)
if next_move is not None:
best_action = tail_direction if tail_direction is not None else best_action
log_string = f"{next_move}, {tail_direction}"
# Example heuristics to eat food that you are close to.
if health[snake_id] < 30:
food_direction = self.go_to_food_if_close(state, json)
if food_direction:
best_action = food_direction
log_string = "Went to food if close."
# TO DO, add your own heuristics
assert best_action in [0, 1, 2, 3], "{} is not a valid action.".format(best_action)
return best_action, log_string
|
[
"[email protected]"
] | |
2d2abfb70b48554ebe8771630db92de5424a3f98
|
b7c18dd657d18d5381ceafaa5fe5089308d2d00f
|
/test/test_data_lake.py
|
d762f11217517f2913b8fb4a7c49fae920c16c63
|
[
"Apache-2.0"
] |
permissive
|
Bezbran/mongo-python-driver
|
06b00b0c89441ed1a290ea0b174a1f4d653b8cd2
|
5332adfe2bb28f8703b0e16f80c0eb351d6201c0
|
refs/heads/master
| 2023-04-04T07:09:40.274116 | 2021-04-19T12:05:15 | 2021-04-19T12:05:15 | 278,339,341 | 0 | 0 |
Apache-2.0
| 2021-04-14T07:31:24 | 2020-07-09T10:55:05 |
Python
|
UTF-8
|
Python
| false | false | 1,672 |
py
|
# Copyright 2020-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Atlas Data Lake."""
import os
import sys
sys.path[0:0] = [""]
from test import client_context, unittest
from test.crud_v2_format import TestCrudV2
from test.utils import TestCreator
# Location of JSON test specifications.
_TEST_PATH = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "data_lake")
class DataLakeTestSpec(TestCrudV2):
# Default test database and collection names.
TEST_DB = 'test'
TEST_COLLECTION = 'driverdata'
@classmethod
@unittest.skipUnless(client_context.is_data_lake,
'Not connected to Atlas Data Lake')
def setUpClass(cls):
super(DataLakeTestSpec, cls).setUpClass()
def setup_scenario(self, scenario_def):
# Spec tests MUST NOT insert data/drop collection for
# data lake testing.
pass
def create_test(scenario_def, test, name):
def run_scenario(self):
self.run_scenario(scenario_def, test)
return run_scenario
TestCreator(create_test, DataLakeTestSpec, _TEST_PATH).create_tests()
if __name__ == "__main__":
unittest.main()
|
[
"[email protected]"
] | |
1c2460f221cff22e2a2f9a27bdfa673a7bcf0bd1
|
38da215b9b50d0743f34cd0ef38e75db557deeff
|
/swagger_client/models/time_stream.py
|
5f68f77e639429a8fcb857cf578972f5eaba0672
|
[
"MIT"
] |
permissive
|
HalestormAI/stravaio
|
0742dc5749d90840ef6c4638ca4c3ee3040d57ce
|
9d99179eb70bf5219ab4d2c7d7b5d3617457ae9e
|
refs/heads/master
| 2020-09-07T14:57:59.836372 | 2020-02-29T00:03:57 | 2020-02-29T00:03:57 | 220,817,328 | 0 | 0 |
MIT
| 2020-02-29T00:03:47 | 2019-11-10T16:29:45 | null |
UTF-8
|
Python
| false | false | 6,366 |
py
|
# coding: utf-8
"""
Strava API v3
Strava API # noqa: E501
OpenAPI spec version: 3.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.models.base_stream import BaseStream # noqa: F401,E501
class TimeStream(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'original_size': 'int',
'resolution': 'str',
'series_type': 'str',
'data': 'list[int]'
}
attribute_map = {
'original_size': 'original_size',
'resolution': 'resolution',
'series_type': 'series_type',
'data': 'data'
}
def __init__(self, original_size=None, resolution=None, series_type=None, data=None): # noqa: E501
"""TimeStream - a model defined in Swagger""" # noqa: E501
self._original_size = None
self._resolution = None
self._series_type = None
self._data = None
self.discriminator = None
if original_size is not None:
self.original_size = original_size
if resolution is not None:
self.resolution = resolution
if series_type is not None:
self.series_type = series_type
if data is not None:
self.data = data
@property
def original_size(self):
"""Gets the original_size of this TimeStream. # noqa: E501
The number of data points in this stream # noqa: E501
:return: The original_size of this TimeStream. # noqa: E501
:rtype: int
"""
return self._original_size
@original_size.setter
def original_size(self, original_size):
"""Sets the original_size of this TimeStream.
The number of data points in this stream # noqa: E501
:param original_size: The original_size of this TimeStream. # noqa: E501
:type: int
"""
self._original_size = original_size
@property
def resolution(self):
"""Gets the resolution of this TimeStream. # noqa: E501
The level of detail (sampling) in which this stream was returned # noqa: E501
:return: The resolution of this TimeStream. # noqa: E501
:rtype: str
"""
return self._resolution
@resolution.setter
def resolution(self, resolution):
"""Sets the resolution of this TimeStream.
The level of detail (sampling) in which this stream was returned # noqa: E501
:param resolution: The resolution of this TimeStream. # noqa: E501
:type: str
"""
allowed_values = ["low", "medium", "high"] # noqa: E501
if resolution not in allowed_values:
raise ValueError(
"Invalid value for `resolution` ({0}), must be one of {1}" # noqa: E501
.format(resolution, allowed_values)
)
self._resolution = resolution
@property
def series_type(self):
"""Gets the series_type of this TimeStream. # noqa: E501
The base series used in the case the stream was downsampled # noqa: E501
:return: The series_type of this TimeStream. # noqa: E501
:rtype: str
"""
return self._series_type
@series_type.setter
def series_type(self, series_type):
"""Sets the series_type of this TimeStream.
The base series used in the case the stream was downsampled # noqa: E501
:param series_type: The series_type of this TimeStream. # noqa: E501
:type: str
"""
allowed_values = ["distance", "time"] # noqa: E501
if series_type not in allowed_values:
raise ValueError(
"Invalid value for `series_type` ({0}), must be one of {1}" # noqa: E501
.format(series_type, allowed_values)
)
self._series_type = series_type
@property
def data(self):
"""Gets the data of this TimeStream. # noqa: E501
The sequence of time values for this stream, in seconds # noqa: E501
:return: The data of this TimeStream. # noqa: E501
:rtype: list[int]
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this TimeStream.
The sequence of time values for this stream, in seconds # noqa: E501
:param data: The data of this TimeStream. # noqa: E501
:type: list[int]
"""
self._data = data
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(TimeStream, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TimeStream):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
fdd0bf61a4c5d7f462b8bf135845df01e7f7da05
|
946a98b6795e8245087543975005e6865b538be3
|
/warikan2.py
|
a69c185f4a9fbfb614ceb3d16c1958a77435048a
|
[] |
no_license
|
kazu-taka/hello-function
|
7c415526e12a17d85987709996a69d364bbe9e62
|
658113b596bd29bf7e360c005f29b192600627e4
|
refs/heads/master
| 2020-04-07T04:07:39.761775 | 2018-11-18T01:18:10 | 2018-11-18T01:18:10 | 158,040,762 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 334 |
py
|
def warikan(amount, number_of_people):
return f"1人あたり: {amount // number_of_people}円, 端数: {amount % number_of_people}円"
print(warikan(amount=1500, number_of_people=3))
print(warikan(amount=2000, number_of_people=3))
print(warikan(amount=3000, number_of_people=4))
print(warikan(amount=5000, number_of_people=8))
|
[
"[email protected]"
] | |
521ea38335f0c6bebf7ef64a8d68203c32de69dc
|
f97cabce5c91238678e159387f03636d4deb90fb
|
/dajare/crawler_kaishaseikatsu_jp.py
|
c0bb1bb7c7b5cf459ec22cf9603ddf779b6d4b93
|
[] |
no_license
|
vaaaaanquish/dajare-python
|
1daa8b4d31a9e3d5e1336d3b31693c1d491ed814
|
150132cef0333a94c9e286c4241af92c630cd7bd
|
refs/heads/master
| 2022-12-10T08:46:42.827279 | 2020-12-11T03:28:54 | 2020-12-11T03:28:54 | 242,304,312 | 16 | 3 | null | 2022-12-08T03:43:06 | 2020-02-22T08:09:23 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 1,010 |
py
|
from tqdm import tqdm
from dajare.crawler import Crawler
class CrawlerKaishaseikatsuJp(Crawler):
def run(self):
output_list = self._run()
self.output(output_list, 'dajare_kaishaseikatsu_jp.json')
def _run(self):
output_list = []
for i in tqdm(range(0, 2200, 100)):
url = f'http://archives.kaishaseikatsu.jp/cgi-bin/kaisha2/board_r.cgi?type=kaisha_dajare&next={i}&range=100'
bs = self.get_bs(url, encoding='shift-jis')
for x in bs.find_all('tr', bgcolor="#FBFFB2"):
output_list.append({
'text': x.find('td').text,
'url': url,
'author': 'kaishaseikatsu',
'author_link': 'http://archives.kaishaseikatsu.jp',
'mean_score': 0.,
'deviation_score': 0.,
'category': [],
'tag': [],
'eval_list': []
})
return output_list
|
[
"[email protected]"
] | |
fed85186a1b405470991007647327d9d0cea5825
|
cde752af8df0ae3007575778ccb0d43c4d5546aa
|
/checkout/migrations/0003_auto_20200812_0925.py
|
cbefb02e0d09f27098c40968bdc32939b9fb1a91
|
[] |
no_license
|
Code-Institute-Submissions/django-eshop-project
|
e9f401fca16b4c56f07a66f01accea09999f208e
|
a6988c80077ca45c62e1753e420616bbc6dc4275
|
refs/heads/master
| 2022-12-03T19:41:59.428298 | 2020-08-15T14:13:25 | 2020-08-15T14:13:25 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 429 |
py
|
# Generated by Django 3.0.8 on 2020-08-12 09:25
from django.db import migrations
import django_countries.fields
class Migration(migrations.Migration):
dependencies = [
('checkout', '0002_auto_20200808_1341'),
]
operations = [
migrations.AlterField(
model_name='order',
name='country',
field=django_countries.fields.CountryField(max_length=2),
),
]
|
[
"[email protected]"
] | |
06dcf6622fdc3c640bb85b1c01cb32e8d2e61985
|
dc95aa9f813bf0fea3695f0e479dd892b0a9280b
|
/image2coord.py
|
452df619ec12e44894fb48e05d8aa28a6e933d00
|
[] |
no_license
|
martibsk/imageROV
|
8f6eea3fa5c5364e60985371f7806e57cbbb7fb8
|
41f623f29571e110a19c58aa173293050cde3a5b
|
refs/heads/master
| 2022-08-14T04:40:29.440244 | 2019-06-06T09:11:10 | 2019-06-06T09:11:10 | 190,339,294 | 0 | 0 | null | 2022-06-21T22:04:52 | 2019-06-05T06:36:30 |
Python
|
UTF-8
|
Python
| false | false | 1,610 |
py
|
import detector
import cv2
from imutils.video import FPS
import numpy as np
import darknet
import os
import detectorCPU
if __name__ == '__main__':
model = 'sylinder'
input_frame = 'sylinder.mp4'
useGPU = True
if useGPU:
netMain, metaMain = detector.init_yolo(model)
vs = detector.video2image(input_frame)
else:
vs = cv2.VideoCapture(input_frame)
# Derive the paths to the YOLO weights and model configuration
weightsPath = os.path.sep.join(["models", model, "yolov3.weights"])
configPath = os.path.sep.join(["models", model, "yolov3.cfg"])
net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)
# Determine only the 'output' layer names that we need from YOLO
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
fps = FPS().start()
while True:
ret, frame_read = vs.read()
# If frame not grabbed, break out of loop
if not ret:
break
if useGPU:
detections = detector.YOLO(frame_read, netMain, metaMain)
else:
detections = detectorCPU.detect(frame_read, net, ln)
key = cv2.waitKey(1)
if key == ord('q'):
break
print(detections)
#detector.printInfo(detections)
# Update the FPS counter
fps.update()
# Stop the timer and display FPS information
fps.stop()
print("\n[INFO] elapsed time: {:2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
vs.release()
vs.release()
|
[
"[email protected]"
] | |
96f1f685114c793b03f5d00db5ccf5b4d271a8ad
|
b47f5ca0a51cf59427b7bd12e9c85064a1e13e03
|
/tests/easyci/test_user_config.py
|
1c8736e3d3bb8b17d77ff5f9e1a29fd3a58cbbd4
|
[
"MIT"
] |
permissive
|
naphatkrit/easyci
|
a490b57e601bcad6d2022834809dd60cb0902e0c
|
7aee8d7694fe4e2da42ce35b0f700bc840c8b95f
|
refs/heads/master
| 2016-09-02T01:14:28.505230 | 2015-09-09T00:26:25 | 2015-09-09T00:26:25 | 41,396,486 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,953 |
py
|
import mock
import os
import pytest
import shutil
import tempfile
import yaml
from easyci.vcs.base import Vcs
from easyci.user_config import (
_config_types, _default_config, load_user_config,
ConfigFormatError, ConfigNotFoundError
)
@pytest.yield_fixture(scope='function')
def repo_path():
path = tempfile.mkdtemp()
try:
yield path
finally:
shutil.rmtree(path)
def _create_config_file(config, path):
with open(os.path.join(path, 'eci.yaml'), 'w') as f:
f.write(yaml.safe_dump(config))
@pytest.fixture(scope='function')
def fake_vcs(repo_path):
vcs = mock.Mock(spec=Vcs)
vcs.path = repo_path
return vcs
def test_default_config_types():
for k, v in _config_types.iteritems():
if k in _default_config:
assert isinstance(_default_config[k], v)
@pytest.mark.parametrize('tests', [
['true'],
[],
['true', 'true'],
])
def test_load_user_config_simple(tests, fake_vcs, repo_path):
_create_config_file({
"tests": tests
}, repo_path)
config = load_user_config(fake_vcs)
assert config['tests'] == tests
@pytest.mark.parametrize('user_config', [
{},
{"other": 0},
])
def test_load_user_config_default_config(user_config, fake_vcs, repo_path):
_create_config_file(user_config, repo_path)
config = load_user_config(fake_vcs)
user_config.update(_default_config)
assert config == user_config
@pytest.mark.parametrize('config_string', [
yaml.safe_dump({}) + '}}',
yaml.safe_dump({'tests': True}),
yaml.safe_dump([]),
])
def test_load_user_config_invalid_config(config_string, fake_vcs, repo_path):
with open(os.path.join(repo_path, 'eci.yaml'), 'w') as f:
f.write(config_string)
with pytest.raises(ConfigFormatError):
load_user_config(fake_vcs)
def test_load_user_config_not_found(fake_vcs):
with pytest.raises(ConfigNotFoundError):
load_user_config(fake_vcs)
|
[
"[email protected]"
] | |
e9d1caab6dde00c07ce3832efe253d9348ac4a88
|
940dcf18bb1db19610e29902c78ec703690c4297
|
/pygame/py002.py
|
17a13a71d3e9bdeacc203460516516e052a3e799
|
[] |
no_license
|
Sahil4UI/PythonRegular11-12Dec2020
|
dc20e8d13d191801301d18d5b92f5775fe9c0674
|
0b22b1d8c703ac21a1f02c2b10f327bcb2e96460
|
refs/heads/main
| 2023-02-27T13:00:22.415199 | 2021-01-31T06:57:58 | 2021-01-31T06:57:58 | 318,424,644 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,076 |
py
|
import random
import pygame
import time
from pygame.locals import *
pygame.init()
H= 600
W=800
gameScreen= pygame.display.set_mode((W,H))
color= (255,255,255)
red = (255 , 0 , 0 )
blue = (0,0,255)
w=30
h=30
pygame.time.set_timer(USEREVENT,1000)
frog=pygame.image.load("frog.png")#raw string-path
frog = pygame.transform.scale(frog,(50,50))
audio = pygame.mixer.Sound("point.wav")
def Score(counter):
font=pygame.font.SysFont(None,30)
#anti aliasing ->texture-> True
text=font.render(f"Score : {counter}",True,blue)
gameScreen.blit(text,(10,10))
def Snake(snakeList):
for i in snakeList:
pygame.draw.rect(gameScreen,red,[i[0],i[1],w,h])
def Timer(sec):
font=pygame.font.SysFont(None,30)
#anti aliasing ->texture-> True
text=font.render(f"Time Left : {sec} seconds",True,blue)
gameScreen.blit(text,(500,10))
def gameOver():
pass
# font=pygame.font.SysFont(None,30)
# #anti aliasing ->texture-> True
# text=font.render(f"***GAME OVER***",True,blue)
# gameScreen.blit(text,(500,10))
def main():
movex = 0
movey = 0
frogX = random.randint(0,W-50)
frogY = random.randint(0,H-50)
x=0
y=0
sec=20
counter=0
snakeList= []
snakeLength=1
while True:
gameScreen.fill(color)
for event in pygame.event.get():
if event.type==pygame.QUIT:
pygame.quit()
quit()
elif event.type==pygame.USEREVENT:
sec-=1
if event.type==pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
movex=-1
movey=0
elif event.key == pygame.K_RIGHT:
movex=1
movey=0
elif event.key==pygame.K_UP:
movey=-1
movex=0
elif event.key==pygame.K_DOWN:
movey=1
movex=0
# gameScreen.blit(image,(imageX,imageY))
snake = pygame.draw.rect(gameScreen,red,[x,y,w,h])
snakeList.append([x,y])
Snake(snakeList)
frogRect = pygame.Rect([frogX,frogY,50,50])
gameScreen.blit(frog,(frogX,frogY))
x += movex
y += movey
if x>W-w:
movex=-1
elif x<0:
movex=1
if y>H-h:
movey=-1
elif y<0:
movey=1
Score(counter)
Timer(sec)
if sec <0:
gameOver()
if snakeLength<len(snakeList):
del snakeList[0]
if snake.colliderect(frogRect):
frogX = random.randint(0,W-50)
frogY = random.randint(0,H-50)
counter+=1
audio.play()
snakeLength+=20
pygame.display.update()
main()
|
[
"[email protected]"
] | |
c693f5db8f614f95e3a1c00a525aaebceea90a87
|
a2277623dee26a0cb76f71092f8a88b363618962
|
/list_servers.py
|
19fac5a3c8ba0541e315b6ae21eed50406969c2f
|
[] |
no_license
|
rangsutu88/Azure-Proxy-Gen
|
474e560cacd5827a7d0e8e35cd69648873336c96
|
722b7168effea4a14aceae060c79ccf4b1f968ad
|
refs/heads/master
| 2022-11-28T09:17:39.180679 | 2020-08-06T03:10:18 | 2020-08-06T03:10:18 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 86 |
py
|
from Azure import display_servers2
if __name__ == '__main__':
display_servers2()
|
[
"[email protected]"
] | |
879086db133bd1ab22783e38d697afc115869d4f
|
71c4a775c81179e920b72bdee87d9af3edfd4d99
|
/01_Sintaxe_Basica/10_dicionario.py
|
9ea3b3c107c7a83db1b023da9899d434b0a3d0f8
|
[] |
no_license
|
frclasso/acate18122018
|
16f4169dbfb0eb8c25e253965642122e6095a211
|
98e4697d4e34c740a537a553b5ae6841159c58f7
|
refs/heads/master
| 2020-04-08T00:54:59.822648 | 2019-01-24T16:55:42 | 2019-01-24T16:55:42 | 158,873,478 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,267 |
py
|
#!/usr/bin/env python3
aluno = {'ID': 1223,
'Nome':'Patricia',
'Idade': 27,
'Curso': 'Sistemas de Informação',
'Turno':'Noturno'
}
print(f"ID: {aluno['ID']}")
print(f"Nome: {aluno['Nome']}")
print(f"Idade:{aluno['Idade']}")
print()
'''Atualizando valores existentes'''
aluno['Idade'] = 28
print(aluno)
print()
'''Inserindo novo campo'''
aluno['Matrícula'] = 8990020198
print(aluno)
print()
# Utilizando o metodo Update
aluno.update({'Turno':'Diurno', 'Sobrenome':'Nunes', 'Telefone':'(48)555-333'})
print(aluno)
print()
'''Deletando items'''
aluno.__delitem__('Idade')
print(aluno)
print()
aluno.pop('Turno')
print(aluno)
print()
del aluno['Matrícula']
print(aluno)
print()
'''Apagando todos os dados'''
# aluno.clear()
# print(aluno) # {}
'''Deletando o dicionario em si'''
# del aluno
# print(aluno) # NameError: name 'aluno' is not defined
'''Criando um dicionario vazio'''
meuDic = {}
print(meuDic)
print(type(meuDic))
#
print(f'Tamanho do dicionario: {len(aluno)} items.')
'''Imprimindo um dicionario com as chaves - keys()'''
print(aluno.keys())
'''Imprimindo um dicionario com os valores - values()'''
print(aluno.values())
'''Imprimindo um dicionario com todos os items'''
print(aluno.items())
|
[
"[email protected]"
] | |
01e192a4c835a3d6ec4c29d6fb66176e51359dcb
|
7c27898a5f85dedf0dbbb12451b6c635861dc197
|
/tornado_overview/chapter03/aiomysql_test.py
|
8c3375ad203593d54c3a67dc4692f73aa301b121
|
[] |
no_license
|
Asunqingwen/Tornado_test_application
|
9323d3289fadf69e7b1e7685da8f631d0e88968f
|
4f3a9cda9fc081a8b83f06934bc480cd597d4ad8
|
refs/heads/master
| 2023-02-18T08:43:58.012236 | 2021-01-21T09:59:57 | 2021-01-21T09:59:57 | 330,935,556 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 662 |
py
|
import asyncio
import aiomysql
from tornado import gen, ioloop
async def go():
pool = await aiomysql.create_pool(host='192.168.10.69', port=3306,
user='root', password='root',
db='message', charset="utf8")
async with pool.acquire() as conn:
async with conn.cursor() as cur:
await cur.execute("SELECT * from message")
value = await cur.fetchone()
print(cur.description)
print(value)
pool.close()
await pool.wait_closed()
if __name__ == '__main__':
io_loop = ioloop.IOLoop.current()
io_loop.run_sync(go)
|
[
"[email protected]"
] | |
429e64977baa323e53d62067c95f88041a1940f3
|
7929367c0d3003cb903c0217b0477abd60e759bc
|
/lexicons.py
|
a80dbb4c8fb12d16bad920d09300117dfbcd7421
|
[] |
no_license
|
daimrod/opinion-sentence-annotator
|
cf5a879c9f24c6f47e7d7278ec730899da0e96fd
|
e487b9a11959876d83316e97c572f0116d982617
|
refs/heads/master
| 2020-06-21T20:41:37.878330 | 2017-03-08T15:01:20 | 2017-03-08T15:01:20 | 74,770,001 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,329 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
logger = logging.getLogger(__name__)
import reader
import resources as res
# A global variables used to store known lexicons
_lexicons = {}
def get_lexicon(lexicon_name):
"""Return the lexicon designated by lexicon_name.
Args:
lexicon_name: The name of a lexicon.
Returns:
Returns the requested lexicon
"""
if lexicon_name not in _lexicons:
raise KeyError('The lexicon \'%s\' has not been registered' % lexicon_name)
lexicon_reader, lexicon_path = _lexicons[lexicon_name]
return lexicon_reader(lexicon_path)
def register_lexicon(lexicon_name, lexicon_reader, lexicon_path):
"""Register a lexicon and how to read it.
This function register into a datastructure how to load a lexicon.
Args:
lexicon_name: The name of a lexicon.
lexicon_reader: A function to read the given lexicon.
lexicon_path: The path to read the given lexicon.
Returns:
Nothing"""
_lexicons[lexicon_name] = (lexicon_reader, lexicon_path)
register_lexicon('bing_liu', reader.read_bing_liu, res.bing_liu_lexicon_path)
register_lexicon('mpqa', reader.read_mpqa, res.mpqa_lexicon_path)
register_lexicon('mpqa_plus', reader.read_mpqa_plus, res.mpqa_plus_lexicon_path)
register_lexicon('nrc_emotion', reader.read_nrc_emotion, res.nrc_emotion_lexicon_path)
register_lexicon('nrc_emotions', reader.read_nrc_emotions, res.nrc_emotion_lexicon_path)
register_lexicon('nrc_hashtag_unigram', reader.read_nrc_hashtag_unigram, res.nrc_hashtag_unigram_lexicon_path)
register_lexicon('nrc_hashtag_bigram', reader.read_nrc_hashtag_bigram, res.nrc_hashtag_bigram_lexicon_path)
register_lexicon('nrc_hashtag_pair', reader.read_nrc_hashtag_pair, res.nrc_hashtag_pair_lexicon_path)
register_lexicon('nrc_hashtag_sentimenthashtags', reader.read_nrc_hashtag_sentimenthashtags, res.nrc_hashtag_sentimenthashtags_lexicon_path)
register_lexicon('lidilem_adjectifs', reader.read_lidilem_adjectifs, res.lidilem_adjectifs_lexicon_path)
register_lexicon('lidilem_noms', reader.read_lidilem_noms, res.lidilem_noms_lexicon_path)
register_lexicon('lidilem_verbes', reader.read_lidilem_verbes, res.lidilem_verbes_lexicon_path)
register_lexicon('blogoscopie', reader.read_blogoscopie, res.blogoscopie_lexicon_path)
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.