hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
559ad11e61e76b073ffa707dfeef7cd524cd64ce
| 4,923 |
py
|
Python
|
delpapa/avalanches/data_analysis.py
|
delpapa/CritSORN
|
cdad55d55f39e04f568ca1bc0c6036bec8db08fb
|
[
"MIT"
] | null | null | null |
delpapa/avalanches/data_analysis.py
|
delpapa/CritSORN
|
cdad55d55f39e04f568ca1bc0c6036bec8db08fb
|
[
"MIT"
] | null | null | null |
delpapa/avalanches/data_analysis.py
|
delpapa/CritSORN
|
cdad55d55f39e04f568ca1bc0c6036bec8db08fb
|
[
"MIT"
] | null | null | null |
########################################################################
# This script contains all the data analysis functions #
########################################################################
from __future__ import division
from pylab import *
import scipy, scipy.stats
import tables
import os
from tempfile import TemporaryFile
def avalanches(activity, variable, value,
Threshold = 'percent', Theta_percent = 15,
Transient = 0, fullS = False, binsize = False):
# Threhold indicates which kind of activity treshold are we using
# If FALSE, Theta_percent percentile is used
# if 'half', 'half' the mean activity is used
# if '1std', mean activity minus one std is used
a_duration_all = []; a_area_all = []
# Theta is by default given by 25 percentile
# Theta MUST be a int
if Threshold == 'percent':
Theta = percentile(activity, Theta_percent)
elif Threshold == 'half':
Theta = int(activity.mean()/2.)
elif Threshold == '1std':
Theta = int(activity.mean()-activity.std())
else:
Theta = Threshold
for data_file in range(len(activity)):
sys.stdout.write('\rcalculating avalanches %d%%' \
%(100*(data_file+1)/len(activity))),
sys.stdout.flush()
# make prettier
if binsize is not False and binsize != 1:
avalan_stable = activity[data_file]
if binsize == 2:
avalan_stable = avalan_stable[::2] + avalan_stable[1::2]
avalan_stable /= 2.
if binsize == 5:
avalan_stable = avalan_stable[::5] + avalan_stable[1::5] +\
avalan_stable[2::5] + avalan_stable[3::5] +\
avalan_stable[4::5]
avalan_stable /= 5.
if binsize == 10:
avalan_stable = avalan_stable[::10] + avalan_stable[1::10] +\
avalan_stable[2::10] + avalan_stable[3::10] +\
avalan_stable[4::10] + avalan_stable[5::10] +\
avalan_stable[6::10] + avalan_stable[7::10] +\
avalan_stable[8::10] + avalan_stable[9::10]
avalan_stable /= 10.
avalan_stable = np.floor(avalan_stable - Theta)
else:
# to avoid empty array error
if len(activity.shape) > 1:
avalan_stable = activity[data_file] - Theta
else:
avalan_stable = activity - Theta
size, area = 0, 0
### TODO: make it prettier - this should include only the avalanches
### before the transient
transient_end = 0
new_range = len(avalan_stable)
if Transient is not 0:
for i in range(Transient, new_range):
if avalan_stable[i] == 0:
transient_end = i + 1
break
new_range = transient_end
####
for i in range(new_range):
if avalan_stable[i] > 0:
size += 1
if not fullS:
area += int(avalan_stable[i])
else:
area += int(activity[data_file][i])
elif size != 0:
a_duration_all.append(size)
a_area_all.append(area)
size, area = 0, 0
# convert to np.array cause it is easier for the other functions
a_duration_all = asarray(a_duration_all)
a_area_all = asarray(a_area_all)
print '...done'
return a_duration_all, a_area_all
### distribution of the total activity
# Return the average activity (as a array), and std
def mean_activity(activity, variable, value):
distribution = zeros((len(activity), activity.max()+1))
for data_file in range(len(activity)):
# print the % in the terminal
sys.stdout.write('\rcalculating activity %d%%' \
%(100*(data_file+1)/len(activity))),
sys.stdout.flush()
total_steps = activity[data_file].size
for i in range(total_steps):
distribution[data_file, activity[data_file, i]] += 1
distribution[data_file, :] /= distribution[data_file, :].sum()
dist_mean = distribution.mean(0)
dist_std = distribution.std(0)
print '...done'
return dist_mean, dist_std
### calculate the size average as a function of the duration
# receives the non-sorted arrays with measures of size and duration
# returns two non-sorted arrays containing the duration and average
# avalanche size.
def area_X_duration(a_dur, a_area):
S_avg = []
T_avg = []
for i in range(len(a_dur)):
duration = a_dur[i]
if duration not in T_avg:
T_avg.append(duration)
S_avg.append(a_area[np.where(a_dur==duration)].mean())
T_avg=asarray(T_avg)
S_avg=asarray(S_avg)
return T_avg, S_avg
| 33.489796 | 78 | 0.558399 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,123 | 0.228113 |
559adf86675fc57065409a6e9ac6154669c807e5
| 3,404 |
py
|
Python
|
edwin/__init__.py
|
AlanSwenson/edwin
|
94f62a4db6cc5123224607f92a1f552be072c708
|
[
"MIT"
] | null | null | null |
edwin/__init__.py
|
AlanSwenson/edwin
|
94f62a4db6cc5123224607f92a1f552be072c708
|
[
"MIT"
] | 8 |
2019-03-13T13:39:00.000Z
|
2019-04-02T14:58:21.000Z
|
edwin/__init__.py
|
AlanSwenson/edwin
|
94f62a4db6cc5123224607f92a1f552be072c708
|
[
"MIT"
] | null | null | null |
import eventlet
eventlet.monkey_patch()
import time
from datetime import datetime, timedelta, timezone
import pytz
from email.utils import parsedate_tz
import json
from flask import Flask, request, render_template
from threading import Thread
from tweepy import OAuthHandler, API, Stream, Cursor
from flask_socketio import (
SocketIO,
emit,
join_room,
leave_room,
close_room,
rooms,
disconnect,
)
from darksky import forecast
socketio = SocketIO()
thread = None
thread2 = None
from edwin.tweets import StdOutListener
def create_app():
app = Flask(__name__)
app.config.from_object("config")
app.config["SECRET_KEY"] = "secret!"
with app.app_context():
socketio.init_app(app, async_mode="eventlet")
CONSUMER_KEY = app.config["TWITTER_CONSUMER_KEY"]
CONSUMER_SECRET = app.config["TWITTER_CONSUMER_SECRET"]
ACCESS_TOKEN = app.config["TWITTER_ACCESS_TOKEN"]
ACCESS_TOKEN_SECRET = app.config["TWITTER_ACCESS_TOKEN_SECRET"]
TWITTER_SCREEN_NAME = app.config["TWITTER_SCREEN_NAME"]
DARKSKY_KEY = app.config["DARKSKY_KEY"]
# These config variables come from 'config.py'
auth = OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api = API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
ids = api.friends_ids(screen_name=TWITTER_SCREEN_NAME, stringify_ids="true")
try:
dc = forecast(DARKSKY_KEY, 38.9159, -77.0446)
except:
print("failed connection to darksky")
@app.route("/", methods=["GET"])
def index():
global thread
global thread2
if thread is None:
thread = Thread(target=twitter_thread, daemon=True)
thread.start()
if thread2 is None:
thread2 = Thread(target=darksky_thread, daemon=True)
thread2.start()
return render_template("index.html")
def twitter_thread():
"""connect to twitter sreaming API and send data to client"""
stream = Stream(auth, listener)
_follow = ["15736341", "1"]
stream.filter(follow=ids, filter_level="low")
def darksky_thread():
while True:
try:
dc.refresh(extend='daily')
sunrise = convert_unix_ts(dc['daily']['data'][0]['sunriseTime'])
sunset = convert_unix_ts(dc['daily']['data'][0]['sunsetTime'])
# convert to int for a nice round whole number temperture
temp = int(dc.temperature)
except:
print("break")
sunrise = "_"
sunset = "-"
temp = "Connection Lost"
socketio.emit(
"darksky_channel",
{"temp": temp,
"sunrise": sunrise,
"sunset": sunset},
namespace="/darksky_streaming",
)
time.sleep(120)
listener = StdOutListener()
return app
def convert_unix_ts(ts):
ts= int(ts)
return datetime.fromtimestamp(ts).strftime('%-I:%M')
| 30.392857 | 84 | 0.574031 | 0 | 0 | 0 | 0 | 418 | 0.122797 | 0 | 0 | 565 | 0.165981 |
559ae7307b62942efd1983a817dbb736879880c0
| 2,255 |
py
|
Python
|
troop/admin.py
|
packmas13/registration
|
bfb42c5479d59494b59e7c656cb04826e110e8d2
|
[
"MIT"
] | 1 |
2020-08-12T09:51:42.000Z
|
2020-08-12T09:51:42.000Z
|
troop/admin.py
|
packmas13/registration
|
bfb42c5479d59494b59e7c656cb04826e110e8d2
|
[
"MIT"
] | 46 |
2020-01-24T16:51:41.000Z
|
2022-03-29T16:03:12.000Z
|
troop/admin.py
|
packmas13/registration
|
bfb42c5479d59494b59e7c656cb04826e110e8d2
|
[
"MIT"
] | 1 |
2020-01-28T21:25:06.000Z
|
2020-01-28T21:25:06.000Z
|
from django import forms
from django.contrib import admin
from .models import Attendance, Diet, Participant, Troop
from payment.admin import DiscountInline, PaymentInline
class AttendanceInline(admin.TabularInline):
model = Participant.attendance.through
readonly_fields = ("participant",)
can_delete = False
def has_add_permission(self, request, obj=None):
return False
class AttendanceAdmin(admin.ModelAdmin):
inlines = [
AttendanceInline,
]
list_display = (
"date",
"is_main",
)
class DietInline(admin.TabularInline):
model = Participant.diet.through
readonly_fields = ("participant",)
can_delete = False
def has_add_permission(self, request, obj=None):
return False
class DietAdmin(admin.ModelAdmin):
inlines = [
DietInline,
]
class ParticipantAdmin(admin.ModelAdmin):
inlines = [
DiscountInline,
]
list_display = (
"troop",
"first_name",
"last_name",
"birthday",
"age_section",
"is_leader",
)
list_display_links = (
"first_name",
"last_name",
"birthday",
)
def formfield_for_dbfield(self, db_field, **kwargs):
formfield = super(ParticipantAdmin, self).formfield_for_dbfield(
db_field, **kwargs
)
if db_field.name == "comment":
formfield.widget = forms.Textarea(attrs=formfield.widget.attrs)
return formfield
class ParticipantInline(admin.TabularInline):
model = Participant
fields = (
"first_name",
"last_name",
"birthday",
)
readonly_fields = (
"first_name",
"last_name",
"birthday",
)
can_delete = False
show_change_link = True
def has_add_permission(self, request, obj=None):
return False
class TroopAdmin(admin.ModelAdmin):
inlines = [
ParticipantInline,
PaymentInline,
]
list_display = (
"number",
"name",
)
list_display_links = ("name",)
admin.site.register(Attendance, AttendanceAdmin)
admin.site.register(Diet, DietAdmin)
admin.site.register(Participant, ParticipantAdmin)
admin.site.register(Troop, TroopAdmin)
| 21.47619 | 75 | 0.632373 | 1,883 | 0.835033 | 0 | 0 | 0 | 0 | 0 | 0 | 233 | 0.103326 |
559af5721a6a15c927e5d10a7e185b857bbef70d
| 142 |
py
|
Python
|
{{cookiecutter.project_name}}/service/worker/beat.py
|
ProjectTemplates/python-backend-service
|
5266916e54faaf236bc972a2cd7bb1217e8a8625
|
[
"MIT"
] | 7 |
2020-07-28T18:45:20.000Z
|
2021-12-11T23:33:49.000Z
|
{{cookiecutter.project_name}}/service/worker/beat.py
|
ProjectTemplates/python-fastapi-backend
|
5266916e54faaf236bc972a2cd7bb1217e8a8625
|
[
"MIT"
] | null | null | null |
{{cookiecutter.project_name}}/service/worker/beat.py
|
ProjectTemplates/python-fastapi-backend
|
5266916e54faaf236bc972a2cd7bb1217e8a8625
|
[
"MIT"
] | 1 |
2020-05-10T20:26:02.000Z
|
2020-05-10T20:26:02.000Z
|
from conf import celery_settings
from .app import app
@app.on_after_configure.connect
def setup_periodic_tasks(sender, **kwargs):
pass
| 15.777778 | 43 | 0.788732 | 0 | 0 | 0 | 0 | 84 | 0.591549 | 0 | 0 | 0 | 0 |
559b8b906411edd79ce8b01d4b0d9cdea4c7292c
| 829 |
py
|
Python
|
demo_snippets/11_Datenvisualisierung/main.py
|
fabod/pro2
|
69b1015fa789ef05bf9b514d94b231f76bdf5e29
|
[
"MIT"
] | 2 |
2020-03-03T14:57:40.000Z
|
2020-03-20T10:59:47.000Z
|
demo_snippets/11_Datenvisualisierung/main.py
|
fabod/pro2
|
69b1015fa789ef05bf9b514d94b231f76bdf5e29
|
[
"MIT"
] | null | null | null |
demo_snippets/11_Datenvisualisierung/main.py
|
fabod/pro2
|
69b1015fa789ef05bf9b514d94b231f76bdf5e29
|
[
"MIT"
] | null | null | null |
from flask import Flask
from flask import render_template
import plotly.express as px
from plotly.offline import plot
app = Flask("Datenvisualisierung")
def data():
data = px.data.gapminder()
data_ch = data[data.country == 'Switzerland']
return data_ch
def viz():
data_ch = data()
fig = px.bar(
data_ch,
x='year', y='pop',
hover_data=['lifeExp', 'gdpPercap'],
color='lifeExp',
labels={
'pop': 'Einwohner der Schweiz',
'year': 'Jahrzehnt'
},
height=400
)
div = plot(fig, output_type="div")
return div
@app.route("/")
def index():
div = viz()
# return str([str(i) for i in data()])
return render_template('index.html', viz_div=div)
if __name__ == '__main__':
app.run(debug=True, port=5000)
| 18.422222 | 53 | 0.587455 | 0 | 0 | 0 | 0 | 141 | 0.170084 | 0 | 0 | 187 | 0.225573 |
559bff5f8a9189b7032f820f194b11e430ff84ea
| 24,336 |
py
|
Python
|
sdk/python/pulumi_digitalocean/database_connection_pool.py
|
mikealgj/pulumi-digitalocean
|
77c109ab364eb69b7668b007c29413f5d2c95209
|
[
"ECL-2.0",
"Apache-2.0"
] | 53 |
2019-04-25T14:43:12.000Z
|
2022-03-14T15:51:44.000Z
|
sdk/python/pulumi_digitalocean/database_connection_pool.py
|
mikealgj/pulumi-digitalocean
|
77c109ab364eb69b7668b007c29413f5d2c95209
|
[
"ECL-2.0",
"Apache-2.0"
] | 158 |
2019-04-15T21:47:18.000Z
|
2022-03-29T21:21:57.000Z
|
sdk/python/pulumi_digitalocean/database_connection_pool.py
|
mikealgj/pulumi-digitalocean
|
77c109ab364eb69b7668b007c29413f5d2c95209
|
[
"ECL-2.0",
"Apache-2.0"
] | 10 |
2019-04-15T20:16:11.000Z
|
2021-05-28T19:08:32.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['DatabaseConnectionPoolArgs', 'DatabaseConnectionPool']
@pulumi.input_type
class DatabaseConnectionPoolArgs:
def __init__(__self__, *,
cluster_id: pulumi.Input[str],
db_name: pulumi.Input[str],
mode: pulumi.Input[str],
size: pulumi.Input[int],
user: pulumi.Input[str],
name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a DatabaseConnectionPool resource.
:param pulumi.Input[str] cluster_id: The ID of the source database cluster. Note: This must be a PostgreSQL cluster.
:param pulumi.Input[str] db_name: The database for use with the connection pool.
:param pulumi.Input[str] mode: The PGBouncer transaction mode for the connection pool. The allowed values are session, transaction, and statement.
:param pulumi.Input[int] size: The desired size of the PGBouncer connection pool.
:param pulumi.Input[str] user: The name of the database user for use with the connection pool.
:param pulumi.Input[str] name: The name for the database connection pool.
"""
pulumi.set(__self__, "cluster_id", cluster_id)
pulumi.set(__self__, "db_name", db_name)
pulumi.set(__self__, "mode", mode)
pulumi.set(__self__, "size", size)
pulumi.set(__self__, "user", user)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="clusterId")
def cluster_id(self) -> pulumi.Input[str]:
"""
The ID of the source database cluster. Note: This must be a PostgreSQL cluster.
"""
return pulumi.get(self, "cluster_id")
@cluster_id.setter
def cluster_id(self, value: pulumi.Input[str]):
pulumi.set(self, "cluster_id", value)
@property
@pulumi.getter(name="dbName")
def db_name(self) -> pulumi.Input[str]:
"""
The database for use with the connection pool.
"""
return pulumi.get(self, "db_name")
@db_name.setter
def db_name(self, value: pulumi.Input[str]):
pulumi.set(self, "db_name", value)
@property
@pulumi.getter
def mode(self) -> pulumi.Input[str]:
"""
The PGBouncer transaction mode for the connection pool. The allowed values are session, transaction, and statement.
"""
return pulumi.get(self, "mode")
@mode.setter
def mode(self, value: pulumi.Input[str]):
pulumi.set(self, "mode", value)
@property
@pulumi.getter
def size(self) -> pulumi.Input[int]:
"""
The desired size of the PGBouncer connection pool.
"""
return pulumi.get(self, "size")
@size.setter
def size(self, value: pulumi.Input[int]):
pulumi.set(self, "size", value)
@property
@pulumi.getter
def user(self) -> pulumi.Input[str]:
"""
The name of the database user for use with the connection pool.
"""
return pulumi.get(self, "user")
@user.setter
def user(self, value: pulumi.Input[str]):
pulumi.set(self, "user", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name for the database connection pool.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class _DatabaseConnectionPoolState:
def __init__(__self__, *,
cluster_id: Optional[pulumi.Input[str]] = None,
db_name: Optional[pulumi.Input[str]] = None,
host: Optional[pulumi.Input[str]] = None,
mode: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
private_host: Optional[pulumi.Input[str]] = None,
private_uri: Optional[pulumi.Input[str]] = None,
size: Optional[pulumi.Input[int]] = None,
uri: Optional[pulumi.Input[str]] = None,
user: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering DatabaseConnectionPool resources.
:param pulumi.Input[str] cluster_id: The ID of the source database cluster. Note: This must be a PostgreSQL cluster.
:param pulumi.Input[str] db_name: The database for use with the connection pool.
:param pulumi.Input[str] host: The hostname used to connect to the database connection pool.
:param pulumi.Input[str] mode: The PGBouncer transaction mode for the connection pool. The allowed values are session, transaction, and statement.
:param pulumi.Input[str] name: The name for the database connection pool.
:param pulumi.Input[str] password: Password for the connection pool's user.
:param pulumi.Input[int] port: Network port that the database connection pool is listening on.
:param pulumi.Input[str] private_host: Same as `host`, but only accessible from resources within the account and in the same region.
:param pulumi.Input[str] private_uri: Same as `uri`, but only accessible from resources within the account and in the same region.
:param pulumi.Input[int] size: The desired size of the PGBouncer connection pool.
:param pulumi.Input[str] uri: The full URI for connecting to the database connection pool.
:param pulumi.Input[str] user: The name of the database user for use with the connection pool.
"""
if cluster_id is not None:
pulumi.set(__self__, "cluster_id", cluster_id)
if db_name is not None:
pulumi.set(__self__, "db_name", db_name)
if host is not None:
pulumi.set(__self__, "host", host)
if mode is not None:
pulumi.set(__self__, "mode", mode)
if name is not None:
pulumi.set(__self__, "name", name)
if password is not None:
pulumi.set(__self__, "password", password)
if port is not None:
pulumi.set(__self__, "port", port)
if private_host is not None:
pulumi.set(__self__, "private_host", private_host)
if private_uri is not None:
pulumi.set(__self__, "private_uri", private_uri)
if size is not None:
pulumi.set(__self__, "size", size)
if uri is not None:
pulumi.set(__self__, "uri", uri)
if user is not None:
pulumi.set(__self__, "user", user)
@property
@pulumi.getter(name="clusterId")
def cluster_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the source database cluster. Note: This must be a PostgreSQL cluster.
"""
return pulumi.get(self, "cluster_id")
@cluster_id.setter
def cluster_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_id", value)
@property
@pulumi.getter(name="dbName")
def db_name(self) -> Optional[pulumi.Input[str]]:
"""
The database for use with the connection pool.
"""
return pulumi.get(self, "db_name")
@db_name.setter
def db_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "db_name", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
The hostname used to connect to the database connection pool.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@property
@pulumi.getter
def mode(self) -> Optional[pulumi.Input[str]]:
"""
The PGBouncer transaction mode for the connection pool. The allowed values are session, transaction, and statement.
"""
return pulumi.get(self, "mode")
@mode.setter
def mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mode", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name for the database connection pool.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
Password for the connection pool's user.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
"""
Network port that the database connection pool is listening on.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter(name="privateHost")
def private_host(self) -> Optional[pulumi.Input[str]]:
"""
Same as `host`, but only accessible from resources within the account and in the same region.
"""
return pulumi.get(self, "private_host")
@private_host.setter
def private_host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_host", value)
@property
@pulumi.getter(name="privateUri")
def private_uri(self) -> Optional[pulumi.Input[str]]:
"""
Same as `uri`, but only accessible from resources within the account and in the same region.
"""
return pulumi.get(self, "private_uri")
@private_uri.setter
def private_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_uri", value)
@property
@pulumi.getter
def size(self) -> Optional[pulumi.Input[int]]:
"""
The desired size of the PGBouncer connection pool.
"""
return pulumi.get(self, "size")
@size.setter
def size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "size", value)
@property
@pulumi.getter
def uri(self) -> Optional[pulumi.Input[str]]:
"""
The full URI for connecting to the database connection pool.
"""
return pulumi.get(self, "uri")
@uri.setter
def uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "uri", value)
@property
@pulumi.getter
def user(self) -> Optional[pulumi.Input[str]]:
"""
The name of the database user for use with the connection pool.
"""
return pulumi.get(self, "user")
@user.setter
def user(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user", value)
class DatabaseConnectionPool(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
cluster_id: Optional[pulumi.Input[str]] = None,
db_name: Optional[pulumi.Input[str]] = None,
mode: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
size: Optional[pulumi.Input[int]] = None,
user: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a DigitalOcean database connection pool resource.
## Example Usage
### Create a new PostgreSQL database connection pool
```python
import pulumi
import pulumi_digitalocean as digitalocean
postgres_example = digitalocean.DatabaseCluster("postgres-example",
engine="pg",
version="11",
size="db-s-1vcpu-1gb",
region="nyc1",
node_count=1)
pool_01 = digitalocean.DatabaseConnectionPool("pool-01",
cluster_id=postgres_example.id,
mode="transaction",
size=20,
db_name="defaultdb",
user="doadmin")
```
## Import
Database connection pools can be imported using the `id` of the source database cluster and the `name` of the connection pool joined with a comma. For example
```sh
$ pulumi import digitalocean:index/databaseConnectionPool:DatabaseConnectionPool pool-01 245bcfd0-7f31-4ce6-a2bc-475a116cca97,pool-01
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] cluster_id: The ID of the source database cluster. Note: This must be a PostgreSQL cluster.
:param pulumi.Input[str] db_name: The database for use with the connection pool.
:param pulumi.Input[str] mode: The PGBouncer transaction mode for the connection pool. The allowed values are session, transaction, and statement.
:param pulumi.Input[str] name: The name for the database connection pool.
:param pulumi.Input[int] size: The desired size of the PGBouncer connection pool.
:param pulumi.Input[str] user: The name of the database user for use with the connection pool.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: DatabaseConnectionPoolArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a DigitalOcean database connection pool resource.
## Example Usage
### Create a new PostgreSQL database connection pool
```python
import pulumi
import pulumi_digitalocean as digitalocean
postgres_example = digitalocean.DatabaseCluster("postgres-example",
engine="pg",
version="11",
size="db-s-1vcpu-1gb",
region="nyc1",
node_count=1)
pool_01 = digitalocean.DatabaseConnectionPool("pool-01",
cluster_id=postgres_example.id,
mode="transaction",
size=20,
db_name="defaultdb",
user="doadmin")
```
## Import
Database connection pools can be imported using the `id` of the source database cluster and the `name` of the connection pool joined with a comma. For example
```sh
$ pulumi import digitalocean:index/databaseConnectionPool:DatabaseConnectionPool pool-01 245bcfd0-7f31-4ce6-a2bc-475a116cca97,pool-01
```
:param str resource_name: The name of the resource.
:param DatabaseConnectionPoolArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(DatabaseConnectionPoolArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
cluster_id: Optional[pulumi.Input[str]] = None,
db_name: Optional[pulumi.Input[str]] = None,
mode: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
size: Optional[pulumi.Input[int]] = None,
user: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = DatabaseConnectionPoolArgs.__new__(DatabaseConnectionPoolArgs)
if cluster_id is None and not opts.urn:
raise TypeError("Missing required property 'cluster_id'")
__props__.__dict__["cluster_id"] = cluster_id
if db_name is None and not opts.urn:
raise TypeError("Missing required property 'db_name'")
__props__.__dict__["db_name"] = db_name
if mode is None and not opts.urn:
raise TypeError("Missing required property 'mode'")
__props__.__dict__["mode"] = mode
__props__.__dict__["name"] = name
if size is None and not opts.urn:
raise TypeError("Missing required property 'size'")
__props__.__dict__["size"] = size
if user is None and not opts.urn:
raise TypeError("Missing required property 'user'")
__props__.__dict__["user"] = user
__props__.__dict__["host"] = None
__props__.__dict__["password"] = None
__props__.__dict__["port"] = None
__props__.__dict__["private_host"] = None
__props__.__dict__["private_uri"] = None
__props__.__dict__["uri"] = None
super(DatabaseConnectionPool, __self__).__init__(
'digitalocean:index/databaseConnectionPool:DatabaseConnectionPool',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
cluster_id: Optional[pulumi.Input[str]] = None,
db_name: Optional[pulumi.Input[str]] = None,
host: Optional[pulumi.Input[str]] = None,
mode: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
private_host: Optional[pulumi.Input[str]] = None,
private_uri: Optional[pulumi.Input[str]] = None,
size: Optional[pulumi.Input[int]] = None,
uri: Optional[pulumi.Input[str]] = None,
user: Optional[pulumi.Input[str]] = None) -> 'DatabaseConnectionPool':
"""
Get an existing DatabaseConnectionPool resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] cluster_id: The ID of the source database cluster. Note: This must be a PostgreSQL cluster.
:param pulumi.Input[str] db_name: The database for use with the connection pool.
:param pulumi.Input[str] host: The hostname used to connect to the database connection pool.
:param pulumi.Input[str] mode: The PGBouncer transaction mode for the connection pool. The allowed values are session, transaction, and statement.
:param pulumi.Input[str] name: The name for the database connection pool.
:param pulumi.Input[str] password: Password for the connection pool's user.
:param pulumi.Input[int] port: Network port that the database connection pool is listening on.
:param pulumi.Input[str] private_host: Same as `host`, but only accessible from resources within the account and in the same region.
:param pulumi.Input[str] private_uri: Same as `uri`, but only accessible from resources within the account and in the same region.
:param pulumi.Input[int] size: The desired size of the PGBouncer connection pool.
:param pulumi.Input[str] uri: The full URI for connecting to the database connection pool.
:param pulumi.Input[str] user: The name of the database user for use with the connection pool.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _DatabaseConnectionPoolState.__new__(_DatabaseConnectionPoolState)
__props__.__dict__["cluster_id"] = cluster_id
__props__.__dict__["db_name"] = db_name
__props__.__dict__["host"] = host
__props__.__dict__["mode"] = mode
__props__.__dict__["name"] = name
__props__.__dict__["password"] = password
__props__.__dict__["port"] = port
__props__.__dict__["private_host"] = private_host
__props__.__dict__["private_uri"] = private_uri
__props__.__dict__["size"] = size
__props__.__dict__["uri"] = uri
__props__.__dict__["user"] = user
return DatabaseConnectionPool(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="clusterId")
def cluster_id(self) -> pulumi.Output[str]:
"""
The ID of the source database cluster. Note: This must be a PostgreSQL cluster.
"""
return pulumi.get(self, "cluster_id")
@property
@pulumi.getter(name="dbName")
def db_name(self) -> pulumi.Output[str]:
"""
The database for use with the connection pool.
"""
return pulumi.get(self, "db_name")
@property
@pulumi.getter
def host(self) -> pulumi.Output[str]:
"""
The hostname used to connect to the database connection pool.
"""
return pulumi.get(self, "host")
@property
@pulumi.getter
def mode(self) -> pulumi.Output[str]:
"""
The PGBouncer transaction mode for the connection pool. The allowed values are session, transaction, and statement.
"""
return pulumi.get(self, "mode")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name for the database connection pool.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def password(self) -> pulumi.Output[str]:
"""
Password for the connection pool's user.
"""
return pulumi.get(self, "password")
@property
@pulumi.getter
def port(self) -> pulumi.Output[int]:
"""
Network port that the database connection pool is listening on.
"""
return pulumi.get(self, "port")
@property
@pulumi.getter(name="privateHost")
def private_host(self) -> pulumi.Output[str]:
"""
Same as `host`, but only accessible from resources within the account and in the same region.
"""
return pulumi.get(self, "private_host")
@property
@pulumi.getter(name="privateUri")
def private_uri(self) -> pulumi.Output[str]:
"""
Same as `uri`, but only accessible from resources within the account and in the same region.
"""
return pulumi.get(self, "private_uri")
@property
@pulumi.getter
def size(self) -> pulumi.Output[int]:
"""
The desired size of the PGBouncer connection pool.
"""
return pulumi.get(self, "size")
@property
@pulumi.getter
def uri(self) -> pulumi.Output[str]:
"""
The full URI for connecting to the database connection pool.
"""
return pulumi.get(self, "uri")
@property
@pulumi.getter
def user(self) -> pulumi.Output[str]:
"""
The name of the database user for use with the connection pool.
"""
return pulumi.get(self, "user")
| 39.764706 | 166 | 0.623973 | 23,890 | 0.981673 | 0 | 0 | 20,883 | 0.858111 | 0 | 0 | 11,248 | 0.462196 |
559c155e6e0b7efb591c20bbc5e5237149bd61eb
| 2,940 |
py
|
Python
|
data_analysis/get_model_statistics.py
|
fluTN/influenza
|
40cbede52bc4e95d52369eebe4a50ad4b71369d1
|
[
"MIT"
] | 1 |
2020-10-29T09:56:31.000Z
|
2020-10-29T09:56:31.000Z
|
data_analysis/get_model_statistics.py
|
fluTN/influenza
|
40cbede52bc4e95d52369eebe4a50ad4b71369d1
|
[
"MIT"
] | null | null | null |
data_analysis/get_model_statistics.py
|
fluTN/influenza
|
40cbede52bc4e95d52369eebe4a50ad4b71369d1
|
[
"MIT"
] | 1 |
2022-01-22T11:34:29.000Z
|
2022-01-22T11:34:29.000Z
|
# -*- coding: utf-8 -*-
"""Script which can be used to compare the features obtained of two different influenza models
Usage:
get_model_statistics.py <model> [--country=<country_name>] [--no-future] [--basedir=<directory>] [--start-year=<start_year>] [--end-year=<end_year>] [--save] [--no-graph]
<baseline> Data file of the first model
<other_method> Data file of the second model
-h, --help Print this help message
"""
import pandas as pd
import numpy as np
from scipy import stats
from docopt import docopt
import os
import glob
from sklearn.metrics import mean_squared_error
import seaborn as sns
import matplotlib.pyplot as plt
sns.set()
def get_results_filename(basepath):
files = [f for f in glob.glob(basepath + "/*-prediction.csv", recursive=True)]
y = os.path.basename(files[0]).split("-")[0]
y2 = os.path.basename(files[0]).split("-")[1]
return "{}-{}".format(y, y2)
if __name__ == "__main__":
args = docopt(__doc__)
model = args["<model>"]
base_dir = args["--basedir"] if args["--basedir"] else "../complete_results"
country = args["--country"] if args["--country"] else "italy"
future = "no-future" if args["--no-future"] else "future"
# Read the baseline results and merge them
model_path = os.path.join(base_dir, args["<model>"], future, country)
season_years = get_results_filename(model_path)
model_file = os.path.join(model_path, "{}-prediction.csv".format(season_years))
# Load the data
data = pd.read_csv(model_file)
# Get only the weeks we care for
start_year = "2007-42" if not args["--start-year"] else args["--start-year"]
end_year = "2019-15" if not args["--end-year"] else args["--end-year"]
start_season = data["week"] >= start_year
end_season = data["week"] <= str(int(end_year.split("-")[0]) + 1) + "-" + end_year.split("-")[1]
total = start_season & end_season
data = data[total]
# Describe the data
print("")
print("[*] Describe the given dataset {}".format(model_file))
print(data.describe())
# Generate residuals
print("")
print("[*] Describe the residuals")
residuals = data["incidence"]-data["prediction"]
print(residuals.describe())
# Get some statistics
print("")
total_pearson = 0
for i in np.arange(0, len(data["prediction"]), 26):
total_pearson += stats.pearsonr(data["prediction"][i:i+26], data["incidence"][i:i+26])[0]
print("Pearson Correlation (value/p): ", total_pearson/(len(data["prediction"])/26))
print("")
print("Mean Squared Error: ", mean_squared_error(data["prediction"], data["incidence"]))
print("")
if not args["--no-graph"]:
ax = sns.distplot(residuals, label="Residual")
plt.figure()
ax = sns.distplot(data["incidence"], label="Incidence")
ax = sns.distplot(data["prediction"], label="Prediction")
plt.legend()
plt.show()
| 33.409091 | 172 | 0.644558 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,157 | 0.393537 |
559f3ab5a294666e58af2d7a21dc2e34d7f16b41
| 21,887 |
py
|
Python
|
sisu/summarizer.py
|
balouf/sisu
|
07541e6a02e545372452b33f7df056331397001f
|
[
"BSD-3-Clause"
] | null | null | null |
sisu/summarizer.py
|
balouf/sisu
|
07541e6a02e545372452b33f7df056331397001f
|
[
"BSD-3-Clause"
] | null | null | null |
sisu/summarizer.py
|
balouf/sisu
|
07541e6a02e545372452b33f7df056331397001f
|
[
"BSD-3-Clause"
] | null | null | null |
from scipy.sparse import vstack
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
from sisu.preprocessing.tokenizer import is_relevant_sentence, make_sentences, sanitize_text
from gismo.gismo import Gismo, covering_order
from gismo.common import auto_k
from gismo.parameters import Parameters
from gismo.corpus import Corpus
from gismo.embedding import Embedding
from sisu.embedding_idf import IdfEmbedding
def cosine_order(projection, sentences, query):
"""
Order relevant sentences by cosine similarity to the query.
Parameters
----------
projection: callable
A function that converts a text into a tuple whose first element is an embedding (typically a Gismo :meth:`~gismo.embedding.Embedding.query_projection`).
sentences: :class:`list` of :class:`dict`
Sentences as output by :func:`~sisu.summarizer.extract_sentences`.
query: :class:`str`
Target query
Returns
-------
:class:`list` of :class:`int`
Ordered list of indexes of relevant sentences, sorted by cosine similarity
"""
relevant_indices = [s['index'] for s in sentences if s['relevant']]
projected_query = projection(query)[0]
projected_sentences = vstack([projection(sentences[i]['sanitized'])[0] for i in relevant_indices])
order = np.argsort(- cosine_similarity(projected_sentences, projected_query)[:, 0])
return [relevant_indices[i] for i in order]
def extract_sentences(source, indices, getter=None, tester=None):
"""
Pick up the entries of the source corresponding to indices and build a list of sentences out of that.
Each sentence is a dictionary with the following keys:
- `index`: position of the sentence in the returned list
- `sentence`: the actual sentence
- `relevant`: a boolean that tells if the sentence is eligible for being part of the summary
- `sanitized`: for relevant sentences, a simplified version to be fed to the embedding
Parameters
----------
source: :class:`list`
list of objects
indices: iterable of :class:`int`
Indexes of the source items to select
getter: callable, optional
Tells how to convert a source entry into text.
tester: callable, optional
Tells if the sentence is eligible for being part of the summary.
Returns
-------
list of dict
Examples
--------
>>> doc1 = ("This is a short sentence! This is a sentence with reference to the url http://www.ix.com! "
... "This sentence is not too short and not too long, without URL and without citation. "
... "I have many things to say in that sentence, to the point "
... "I do not know if I will stop anytime soon but don\'t let it stop "
... "you from reading this meaninless garbage and this goes on and "
... "this goes on and this goes on and this goes on and this goes on and "
... "this goes on and this goes on and this goes on and this goes on "
... "and this goes on and this goes on and this goes on and this goes "
... "on and this goes on and this goes on and this goes on and this goes "
... "on and this goes on and that is all.")
>>> doc2 = ("This is a a sentence with some citations [3, 7]. "
... "This sentence is not too short and not too long, without URL and without citation. "
... "Note that the previous sentence is already present in doc1. "
... "The enzyme cytidine monophospho-N-acetylneuraminic acid hydroxylase (CMAH) catalyzes "
... "the synthesis of Neu5Gc by hydroxylation of Neu5Ac (Schauer et al. 1968).")
>>> extract_sentences([doc1, doc2], [1, 0]) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
[{'index': 0, 'sentence': 'This is a a sentence with some citations [3, 7].', 'relevant': False, 'sanitized': ''},
{'index': 1, 'sentence': 'This sentence is not too short and not too long, without URL and without citation.',
'relevant': True, 'sanitized': 'This sentence is not too short and not too long without URL and without citation'},
{'index': 2, 'sentence': 'Note that the previous sentence is already present in doc1.',
'relevant': True, 'sanitized': 'Note that the previous sentence is already present in doc'},
{'index': 3, 'sentence': 'The enzyme cytidine monophospho-N-acetylneuraminic acid hydroxylase (CMAH) catalyzes
the synthesis of Neu5Gc by hydroxylation of Neu5Ac (Schauer et al. 1968).',
'relevant': False, 'sanitized': ''},
{'index': 4, 'sentence': 'This is a short sentence!', 'relevant': False, 'sanitized': ''},
{'index': 5, 'sentence': 'This is a sentence with reference to the url http://www.ix.com!',
'relevant': False, 'sanitized': ''},
{'index': 6, 'sentence': 'This sentence is not too short and not too long, without URL and without citation.',
'relevant': False, 'sanitized': ''},
{'index': 7, 'sentence': "I have many things to say in that sentence...",
'relevant': False, 'sanitized': ''}]
"""
if getter is None:
getter = str
if tester is None:
tester = is_relevant_sentence
sentences = [{'index': i, 'sentence': sent, 'relevant': tester(sent)}
for i, sent in enumerate([sent for j in indices
for sent in make_sentences(getter(source[j]))])]
used = set()
for s in sentences:
if s['sentence'] in used and s['relevant']:
s['relevant'] = False
else:
used.add(s['sentence'])
s['sanitized'] = sanitize_text(s['sentence']) if s['relevant'] else ""
return sentences
default_summarizer_parameters = {
'order': 'rank',
'text_getter': None,
'sentence_tester': is_relevant_sentence,
'itf': True,
'post_processing': lambda summa, i: summa.sentences_[i]['sentence'],
'sentence_gismo_parameters': {'post': False, 'resolution': .99},
'num_documents': None,
'num_query': None,
'num_sentences': None,
'max_chars': None}
"""
List of parameters for the summarizer with their default values.
Parameters
-----------
order: :class:`str`
Sorting function.
text_getter: callable
Extraction of text from corpus item. If not specify, the to_text of the :class:`~gismo.corpus.Corpus` will be used.
sentence_tester: callable
Function that estimates if a sentence is eligible to be part of the summary
itf: :class:`bool`
Use of ITF normalization in the sentence-level Gismo
post_processing: callable
post_processing transformation. Signature is (:class:`~sisu.summarizer.Summarizer`, :class:`int`) -> :class:`str`
sentence_gismo_parameters: :class:`dict`
Tuning of sentence-level gismo. `post` MUST be set to False.
num_documents: :class:`int` or None
Number of documents to pre-select
num_query: :class:`int` or None
Number of features to use in generic query
num_sentences: :class:`int` or None
Number of sentences to return
max_chars: :class:`int` or None
Maximal number of characters to return
"""
class Summarizer:
"""
Summarizer class.
Parameters
----------
gismo: :class:`~gismo.gismo.Gismo`
Gismo of the documents to analyze.
kwargs: :class:`dict`
Parameters of the summarizer (see :obj:`~sisu.summarizer.default_summarizer_parameters` for details).
Attributes
----------
query_: :class:`str`
Query used to summarize.
sentences_: :class:`list` of :class:`dict`
Selected sentences. Each sentence is a dictionary with the following keys:
- `index`: position of the sentence in the returned list
- `sentence`: the actual sentence
- `relevant`: a boolean that tells if the sentence is eligible for being part of the summary
- `sanitized`: for relevant sentences, a simplified version to be fed to the embedding
order_: :class:`numpy.ndarray`
Proposed incomplete ordering of the :class:`~sisu.summarizer.Summarizer.sentences_`
sentence_gismo_: :class:`~gismo.gismo.Gismo`
Gismo running at sentence level.
parameters: :class:`~gismo.parameters.Parameters`
Handler of parameters.
Examples
--------
The package contains a data folder with a toy gismo with articles related to Covid-19. We load it.
>>> gismo = Gismo(filename="toy_gismo", path="data")
Then we build a summarizer out of it. We tell to fetch the sentences from the content of the articles.
>>> summa = Summarizer(gismo, text_getter = lambda d: d['content'])
Ask for a summary on *bat* with a maximal budget of 500 characters, using pure TF-IDF sentence embedding.
>>> summa('bat', max_chars=500, itf=False) # doctest: +NORMALIZE_WHITESPACE
['By comparing the amino acid sequence of 2019-nCoV S-protein (GenBank Accession: MN908947.3) with
Bat SARS-like coronavirus isolate bat-SL-CoVZC45 and Bat SARS-like coronavirus isolate Bat-SL-CoVZXC21,
the latter two were shown to share 89.1% and 88.6% sequence identity to 2019-nCoV S-protein
(supplementary figure 1) .',
'Within our bat-hemoplasma network, genotype sharing was restricted to five host communities,
380 whereas six genotypes were each restricted to a single bat species (Fig. 5A ).']
Now a summary based on the *cosine* ordering, using the content of abstracts and pure TF-IDF sentence embedding.
>>> summa('bat', max_chars=500, order='cosine', text_getter = lambda d: d['abstract']) # doctest: +NORMALIZE_WHITESPACE
['Bat dipeptidyl peptidase 4 (DPP4) sequences were closely related to 38 those of human and non-human
primates but distinct from dromedary DPP4 sequence.',
'The multiple sequence alignment data correlated with already published reports on SARS-CoV-2
indicated that it is closely related to Bat-Severe Acute Respiratory Syndrome like coronavirus
(Bat CoV SARS-like) and wellstudied Human SARS.',
'(i.e., hemoplasmas) across a species-rich 40 bat community in Belize over two years.']
Now 4 sentences using a *coverage* ordering.
>>> summa('bat', num_sentences=4, order='coverage') # doctest: +NORMALIZE_WHITESPACE
['By comparing the amino acid sequence of 2019-nCoV S-protein (GenBank Accession: MN908947.3)
with Bat SARS-like coronavirus isolate bat-SL-CoVZC45 and Bat SARS-like coronavirus isolate
Bat-SL-CoVZXC21, the latter two were shown to share 89.1% and 88.6% sequence identity
to 2019-nCoV S-protein (supplementary figure 1) .',
'However, we have not done the IDPs analysis for ORF10 from the Bat-SL-CoVZC45 strain since we
have taken different strain of Bat CoV (reviewed strain HKU3-1) in our study.',
'To test the dependence of the hemoplasma 290 phylogeny upon the bat phylogeny and thus assess
evidence of evolutionary codivergence, we 291 applied the Procrustes Approach to Cophylogeny
(PACo) using distance matrices and the paco 292 We used hemoplasma genotype assignments to
create a network, with each node representing a 299 bat species and edges representing shared
genotypes among bat species pairs.',
'However, these phylogenetic patterns in prevalence were decoupled from those describing bat
526 species centrality in sharing hemoplasmas, such that genotype sharing was generally
restricted 527 by bat phylogeny.']
As you can see, there are some ``However, '' in the answers.
A bit of NLP post_processing can take care of those.
>>> import spacy
>>> nlp = spacy.load("en_core_web_sm")
>>> post_nlp = PostNLP(nlp)
>>> summa('bat', num_sentences=4, order='coverage', post_processing=post_nlp) # doctest: +NORMALIZE_WHITESPACE
['By comparing the amino acid sequence of 2019-nCoV S-protein (GenBank Accession: MN908947.3)
with Bat SARS-like coronavirus isolate bat-SL-CoVZC45 and Bat SARS-like coronavirus isolate
Bat-SL-CoVZXC21, the latter two were shown to share 89.1% and 88.6% sequence identity
to 2019-nCoV S-protein (supplementary figure 1) .',
'We have not done the IDPs analysis for ORF10 from the Bat-SL-CoVZC45 strain since we
have taken different strain of Bat CoV (reviewed strain HKU3-1) in our study.',
'To test the dependence of the hemoplasma 290 phylogeny upon the bat phylogeny and thus assess
evidence of evolutionary codivergence, we 291 applied the Procrustes Approach to Cophylogeny
(PACo) using distance matrices and the paco 292 We used hemoplasma genotype assignments to
create a network, with each node representing a 299 bat species and edges representing shared
genotypes among bat species pairs.',
'These phylogenetic patterns in prevalence were decoupled from those describing bat
526 species centrality in sharing hemoplasmas, such that genotype sharing was generally
restricted 527 by bat phylogeny.']
"""
def __init__(self, gismo, **kwargs):
self.gismo = gismo
self.query_ = None
self.sentences_ = None
self.order_ = None
self.sentence_gismo_ = None
self.parameters = Parameters(parameter_list=default_summarizer_parameters, **kwargs)
if self.parameters.text_getter is None:
self.parameters.text_getter = self.gismo.corpus.to_text
def rank_documents(self, query, num_query=None):
"""
Perform a Gismo query at document-level. If the query fails, builds a generic query instead.
The :attr:`~sisu.summarizer.Summarizer.gismo` and
:attr:`~sisu.summarizer.Summarizer.query_` attributes are updated.
Parameters
----------
query: :class:`str`
Input text
num_query: :class:`int`
Number of words of the generic query, is any
Returns
-------
None
"""
if num_query is None:
num_query = self.parameters.num_query
success = self.gismo.rank(query)
if success:
self.query_ = query
else:
self.query_ = " ".join(self.gismo.get_features_by_rank(k=num_query))
self.gismo.rank(self.query_)
def build_sentence_source(self, num_documents=None, getter=None, tester=None):
"""
Creates the corpus of sentences (:attr:`~sisu.summarizer.Summarizer.sentences_`)
Parameters
----------
num_documents: :class:`int`, optional
Number of documents to select (if not, Gismo will automatically decide).
getter: callable
Extraction of text from corpus item. If not specify, the to_text of the :class:`~gismo.corpus.Corpus` will be used.
tester: callable
Function that estimates if a sentence is eligible to be part of the summary.
Returns
-------
None
"""
if num_documents is None:
num_documents = self.parameters.num_documents
if getter is None:
getter = self.parameters.text_getter
if tester is None:
tester = self.parameters.sentence_tester
self.sentences_ = extract_sentences(source=self.gismo.corpus,
indices=self.gismo.get_documents_by_rank(k=num_documents,
post=False),
getter=getter,
tester=tester)
def build_sentence_gismo(self, itf=None, s_g_p=None):
"""
Creates the Gismo of sentences (:attr:`~sisu.summarizer.Summarizer.sentence_gismo_`)
Parameters
----------
itf: :class:`bool`, optional
Applies TF-IDTF embedding. I False, TF-IDF embedding is used.
s_g_p: :class:`dict`
Parameters for the sentence Gismo.
Returns
-------
None
"""
if itf is None:
itf = self.parameters.itf
if s_g_p is None:
s_g_p = self.parameters.sentence_gismo_parameters
sentence_corpus = Corpus(source=self.sentences_, to_text=lambda s: s['sanitized'])
sentence_embedding = Embedding() if itf else IdfEmbedding()
sentence_embedding.fit_ext(embedding=self.gismo.embedding)
sentence_embedding.transform(sentence_corpus)
self.sentence_gismo_ = Gismo(sentence_corpus, sentence_embedding, **s_g_p)
def build_coverage_order(self, k):
"""
Populate :attr:`~sisu.summarizer.Summarizer.order_` with a covering order with
target number of sentences *k*. The actual number of indices is stretched
by the sentence Gismo stretch factor.
Parameters
----------
k: :class:`int`
Number of optimal covering sentences.
Returns
-------
:class:`numpy.ndarray`
Covering order.
"""
p = self.sentence_gismo_.parameters(post=False)
cluster = self.sentence_gismo_.get_documents_by_cluster(k=int(k * p['stretch']), **p)
return covering_order(cluster, wide=p['wide'])
def summarize(self, query="", **kwargs):
"""
Performs a full run of all summary-related operations:
- Rank a query at document level, fallback to a generic query if the query fails;
- Extract sentences from the top documents
- Order sentences by one of the three methods proposed, *rank*, *coverage*, and *cosine*
- Apply post-processing and return list of selected sentences.
Note that calling a :class:`~sisu.summarizer.Summarizer` will call its
:meth:`~sisu.summarizer.Summarizer.summarize` method.
Parameters
----------
query: :class:`str`
Query to run.
kwargs: :class:`dict`
Runtime specific parameters
(see :obj:`~sisu.summarizer.default_summarizer_parameters` for possible arguments).
Returns
-------
:class:`list` of :class:`str`
Summary.
"""
# Instantiate parameters for the call
p = self.parameters(**kwargs)
# Perform query, fallback to generic query in case of failure
self.rank_documents(query=query, num_query=p['num_query'])
# Extract and preprocess sentences
self.build_sentence_source(num_documents=p['num_documents'], getter=p['text_getter'],
tester=p['sentence_tester'])
# Order sentences
if p['order'] == 'cosine':
self.order_ = cosine_order(self.gismo.embedding.query_projection, self.sentences_, self.query_)
elif p['order'] in {'rank', 'coverage'}:
self.build_sentence_gismo(itf=p['itf'], s_g_p=p['sentence_gismo_parameters'])
self.sentence_gismo_.rank(query)
if p['num_sentences'] is None:
p['num_sentences'] = auto_k(data=self.sentence_gismo_.diteration.x_relevance,
order=self.sentence_gismo_.diteration.x_order,
max_k=self.sentence_gismo_.parameters.max_k,
target=self.sentence_gismo_.parameters.target_k)
if p['order'] == 'rank':
self.order_ = self.sentence_gismo_.diteration.x_order
else:
self.order_ = self.build_coverage_order(p['num_sentences'])
if p['max_chars'] is None:
results = [p['post_processing'](self, i) for i in self.order_[:p['num_sentences']]]
return [txt for txt in results if len(txt)>0]
else:
results = []
length = 0
# Maximal number of sentences that will be processed
max_sentences = int(p['max_chars']/50)
for i in self.order_[:max_sentences]:
txt = p['post_processing'](self, i)
l = len(txt)
if l>0 and length+l < p['max_chars']:
results.append(txt)
length += l
if length > .98*p['max_chars']:
break
return results
def __call__(self, query="", **kwargs):
return self.summarize(query, **kwargs)
class PostNLP:
"""
Post-processor for the :class:`~sisu.summarizer.Summarizer` that leverages a spacy NLP engine.
- Discard sentences with no verb.
- Remove adverbs and punctuations that starts a sentence (e.g. "However, we ..." -> "We ...").
- Optionally, if the engine supports co-references, resolve them.
Parameters
----------
nlp: callable
A Spacy nlp engine.
coref: :class:`bool`
Resolve co-references if the nlp engine supports it.
"""
def __init__(self, nlp, coref=False):
self.nlp = nlp
self.coref = coref
def __call__(self, summa, i):
nlp_sent = self.nlp(summa.sentences_[i]['sentence'])
tags = {token.tag_ for token in nlp_sent}
if not any([t.startswith("VB") for t in tags]):
summa.sentences_[i]['relevant'] = False
return ""
while nlp_sent[0].pos_ == "ADV" and len(nlp_sent)>0:
nlp_sent = nlp_sent[1:]
if nlp_sent[0].pos_ == "PUNCT":
nlp_sent = nlp_sent[1:]
txt = nlp_sent.text
summa.sentences_[i]['sentence'] = f"{txt[0].upper()}{txt[1:]}"
if "PRP" in tags and self.coref and hasattr(nlp_sent._, 'has_coref'):
extract_str = " ".join([s['sentence'] for s in summa.sentences_[max(0, i - 2) : i + 1]])
extract = self.nlp(extract_str)
if extract._.has_coref:
resolved_extract = extract._.coref_resolved
summa.sentences_[i]['sentence'] = make_sentences(resolved_extract)[-1]
return summa.sentences_[i]['sentence']
| 46.077895 | 161 | 0.646 | 14,742 | 0.673551 | 0 | 0 | 0 | 0 | 0 | 0 | 15,225 | 0.695618 |
559fa91e2cb3fcb7a60d3f0698d9ba9ef4cfe606
| 4,482 |
py
|
Python
|
automr/bridge.py
|
hebrewsnabla/pyAutoMR
|
8e81ed7fd780abd94f8b51e48ee4b980a868c204
|
[
"Apache-2.0"
] | 5 |
2021-06-03T07:49:02.000Z
|
2022-02-21T11:35:20.000Z
|
automr/bridge.py
|
hebrewsnabla/pyAutoMR
|
8e81ed7fd780abd94f8b51e48ee4b980a868c204
|
[
"Apache-2.0"
] | 2 |
2022-01-20T08:33:59.000Z
|
2022-03-26T12:21:15.000Z
|
automr/bridge.py
|
hebrewsnabla/pyAutoMR
|
8e81ed7fd780abd94f8b51e48ee4b980a868c204
|
[
"Apache-2.0"
] | 1 |
2022-02-21T11:35:34.000Z
|
2022-02-21T11:35:34.000Z
|
import numpy as np
import os
from automr import dump_mat
from functools import partial, reduce
print = partial(print, flush=True)
einsum = partial(np.einsum, optimize=True)
def print_mol(mol):
print(mol._basis)
print(mol.atom)
print(mol._atom)
print(mol.aoslice_by_atom())
print(mol.ao_labels())
#if mol.verbose >= logger.DEBUG:
mol.stdout.write('[INPUT] ---------------- BASIS SET ---------------- \n')
mol.stdout.write('[INPUT] l, kappa, [nprim/nctr], '
'expnt, c_1 c_2 ...\n')
for atom, basis_set in mol._basis.items():
mol.stdout.write('[INPUT] %s\n' % atom)
for b in basis_set:
if isinstance(b[1], int):
kappa = b[1]
b_coeff = b[2:]
else:
kappa = 0
b_coeff = b[1:]
nprim = len(b_coeff)
nctr = len(b_coeff[0])-1
if nprim < nctr:
logger.warn(mol, 'num. primitives smaller than num. contracted basis')
mol.stdout.write('[INPUT] %d %2d [%-5d/%-4d] '
% (b[0], kappa, nprim, nctr))
for k, x in enumerate(b_coeff):
if k == 0:
mol.stdout.write('%-15.12g ' % x[0])
else:
mol.stdout.write(' '*32+'%-15.12g ' % x[0])
for c in x[1:]:
mol.stdout.write(' %4.12g' % c)
mol.stdout.write('\n')
def py2qchem(mf, basename, is_uhf=False):
if is_uhf:
mo_coeffa = mf.mo_coeff[0]
mo_coeffb = mf.mo_coeff[1]
#mo_enea = mf.mo_energy[0]
#mo_eneb = mf.mo_energy[1]
else:
mo_coeffa = mf.mo_coeff
mo_coeffb = mf.mo_coeff
#mo_enea = mf.mo_energy
#mo_eneb = mf.mo_energy
mo_enea = np.zeros(len(mo_coeffa))
mo_eneb = np.zeros(len(mo_coeffa))
Sdiag = mf.get_ovlp().diagonal()**(0.5)
mo_coeffa = einsum('ij,i->ij', mo_coeffa, Sdiag).T
mo_coeffb = einsum('ij,i->ij', mo_coeffb, Sdiag).T
#dump_mat.dump_mo(mf.mol, mo_coeffa, ncol=10)
guess_file = np.vstack([mo_coeffa, mo_coeffb, mo_enea, mo_eneb]).flatten()
tmpbasename = '/tmp/qchem/' + basename
os.system('mkdir -p ' + tmpbasename)
with open(tmpbasename + '/53.0', 'w') as f:
guess_file.tofile(f, sep='')
create_qchem_in(mf, basename)
def create_qchem_in(mf, basename, uhf=False, sph=True):
atom = mf.mol.format_atom(mf.mol.atom, unit=1)
with open(basename + '.in', 'w') as f:
f.write('$molecule\n')
f.write(' %d %d\n' % (mf.mol.charge, mf.mol.spin+1))
for a in atom:
f.write(' %s %12.6f %12.6f %12.6f\n' % (a[0], a[1][0], a[1][1], a[1][2]))
f.write('$end\n\n')
'''f.write('$rem\n')
f.write(' method = hf\n')
if uhf:
f.write(' unrestricted = true\n')
f.write(' basis = cc-pvdz\n')
f.write(' print_orbitals = true\n')
f.write(' sym_ignore = true\n')
if sph:
f.write(' purecart = 1111\n')
else:
f.write(' purecart = 2222\n')
f.write(' scf_guess_print = 2\n')
f.write(' scf_guess = read\n')
f.write(' scf_convergence = 0\n')
f.write(' thresh = 12\n')
f.write('$end\n\n')
f.write('@@@\n\n')
f.write('$molecule\n')
f.write('read\n')
f.write('$end\n\n')'''
f.write('$rem\n')
#f.write(' method = hf\n')
f.write(' correlation = pp\n')
f.write(' gvb_local = 0\n')
f.write(' gvb_n_pairs = 2\n')
f.write(' gvb_print = 1\n')
if uhf:
f.write(' unrestricted = true\n')
f.write(' basis = cc-pvdz\n')
f.write(' print_orbitals = true\n')
f.write(' sym_ignore = true\n')
if sph:
f.write(' purecart = 1111\n')
else:
f.write(' purecart = 2222\n')
f.write(' scf_guess_print = 2\n')
f.write(' scf_guess = read\n')
f.write(' thresh = 12\n')
f.write('$end\n\n')
def qchem2py(basename):
with open('/tmp/qchem/' + basename + '/53.0', 'r') as f:
data = np.fromfile(f)
print(data.shape)
n = data.shape[0]
#x = sympy.Symbol('x')
#nmo = sympy.solve(2*x*(x+1) -n, x)
nmo = int(np.sqrt(n/2.0+0.25)-0.5)
moa = data[:nmo*nmo].reshape(nmo,nmo).T
mob = data[nmo*nmo:2*nmo*nmo].reshape(nmo,nmo).T
mo = (moa, mob)
return mo
| 35.015625 | 86 | 0.51071 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,611 | 0.359438 |
55a1b6b516c4d12eb63cdf47d747201063521f8c
| 487 |
py
|
Python
|
Example/playstore.py
|
goodop/api-imjustgood.com
|
6406b531c4393fa8a4ace3c206d23895da915caf
|
[
"MIT"
] | 4 |
2021-01-01T10:20:13.000Z
|
2021-11-08T09:32:54.000Z
|
Example/playstore.py
|
goodop/api-imjustgood.com
|
6406b531c4393fa8a4ace3c206d23895da915caf
|
[
"MIT"
] | null | null | null |
Example/playstore.py
|
goodop/api-imjustgood.com
|
6406b531c4393fa8a4ace3c206d23895da915caf
|
[
"MIT"
] | 25 |
2021-01-09T18:22:32.000Z
|
2021-05-29T07:42:06.000Z
|
from justgood import imjustgood
media = imjustgood("YOUR_APIKEY_HERE")
query = "gojek" # example query
data = media.playstore(query)
# Get attributes
number = 0
result = "Playstore :"
for a in data["result"]:
number += 1
result += "\n\n{}. {}".format(number, a["title"])
result += "\nDeveloper : {}".format(a["developer"])
result += "\nThumbnail : {}".format(a["thumbnail"])
result += "\nURL : {}".format(a["pageUrl"])
print(result)
# Get JSON results
print(data)
| 24.35 | 55 | 0.63655 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 193 | 0.396304 |
55a448450ef16dcbbfd95d6484daa13257f8e1ca
| 1,089 |
py
|
Python
|
disjoint_set.py
|
Mt-Kunlun/Object-Saliency-Map-Atari
|
759f7d9d2658626043f6b0e0dcaf8acd3c0e4655
|
[
"MIT"
] | null | null | null |
disjoint_set.py
|
Mt-Kunlun/Object-Saliency-Map-Atari
|
759f7d9d2658626043f6b0e0dcaf8acd3c0e4655
|
[
"MIT"
] | null | null | null |
disjoint_set.py
|
Mt-Kunlun/Object-Saliency-Map-Atari
|
759f7d9d2658626043f6b0e0dcaf8acd3c0e4655
|
[
"MIT"
] | null | null | null |
import numpy as np
# disjoint-set forests using union-by-rank and path compression (sort of).
class universe:
def __init__(self, n_elements):
self.num = n_elements
self.elts = np.empty(shape=(n_elements, 3), dtype=int)
for i in range(n_elements):
self.elts[i, 0] = 0 # rank
self.elts[i, 1] = 1 # size
self.elts[i, 2] = i # p
def size(self, x):
return self.elts[x, 1]
def num_sets(self):
return self.num
def find(self, x):
y = int(x)
while y != self.elts[y, 2]:
y = self.elts[y, 2]
self.elts[x, 2] = y
return y
def join(self, x, y):
# x = int(x)
# y = int(y)
if self.elts[x, 0] > self.elts[y, 0]:
self.elts[y, 2] = x
self.elts[x, 1] += self.elts[y, 1]
else:
self.elts[x, 2] = y
self.elts[y, 1] += self.elts[x, 1]
if self.elts[x, 0] == self.elts[y, 0]:
self.elts[y, 0] += 1
self.num -= 1
| 27.923077 | 75 | 0.459137 | 987 | 0.906336 | 0 | 0 | 0 | 0 | 0 | 0 | 119 | 0.109275 |
55a528f7f755e76f01a1fec6c18655befd899209
| 131 |
py
|
Python
|
Logon.py
|
fenglihanxiao/multi_test
|
46ee84aaa36f1d9594ccf7a14caa167dfcd719d5
|
[
"MIT"
] | null | null | null |
Logon.py
|
fenglihanxiao/multi_test
|
46ee84aaa36f1d9594ccf7a14caa167dfcd719d5
|
[
"MIT"
] | null | null | null |
Logon.py
|
fenglihanxiao/multi_test
|
46ee84aaa36f1d9594ccf7a14caa167dfcd719d5
|
[
"MIT"
] | null | null | null |
num1 = 1
num2 = 20
num3 = 168
# dev first commit
num1 = 1
# resolve conflict
num2 = 88888888
# Test next commit
num3 = 99
| 8.1875 | 18 | 0.641221 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 54 | 0.412214 |
55a5624a3d2ac28eb83b211136e77b9c0d5431d3
| 1,441 |
py
|
Python
|
latteys/latteys/doctype/auto_mail.py
|
hrgadesha/lattyeys
|
428b752ac99620ac7ad706fd305f07210bdcb315
|
[
"MIT"
] | 1 |
2021-09-10T03:51:22.000Z
|
2021-09-10T03:51:22.000Z
|
latteys/latteys/doctype/auto_mail.py
|
hrgadesha/lattyeys
|
428b752ac99620ac7ad706fd305f07210bdcb315
|
[
"MIT"
] | null | null | null |
latteys/latteys/doctype/auto_mail.py
|
hrgadesha/lattyeys
|
428b752ac99620ac7ad706fd305f07210bdcb315
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
import frappe
from datetime import datetime
from frappe.model.document import Document
@frappe.whitelist(allow_guest=True)
def sendMail(doc,method):
if doc.send_email_on_event_creation:
for d in doc.event_participants:
if d.reference_doctype == "Employee":
email = frappe.db.sql("""select prefered_email,employee_name from `tabEmployee` where name = %s;""",(d.reference_docname))
if email:
content = "<h4>Dear,</h4><p>"+email[0][1]+"</p><br><br><p>Event : "+str(doc.subject)+"</p><p>Start : "+str(doc.starts_on)+"</p><p>End : "+str(doc.ends_on)+"</p><p>Event Category : "+str(doc.event_category)+"</p><br><p><b>Description : </b><br>"+str(doc.description)+"</p>"
frappe.sendmail(recipients=email[0][0],sender="[email protected]",subject="Invitation For Event", content=content)
if d.reference_doctype == "Lead":
email = frappe.db.sql("""select email_id,lead_name from `tabLead` where name = %s;""",(d.reference_docname))
if email:
content = "<h4>Dear,</h4><p>"+email[0][1]+"</p><br><br><p>Event : "+str(doc.subject)+"</p><p>Start : "+str(doc.starts_on)+"</p><p>End : "+str(doc.ends_on)+"</p><p>Event Category : "+str(doc.event_category)+"</p><br><p><b>Description : </b><br>"+str(doc.description)+"</p>"
frappe.sendmail(recipients=email[0][0],sender="[email protected]",subject="Invitation For Event", content=content)
| 65.5 | 277 | 0.687717 | 0 | 0 | 0 | 0 | 1,312 | 0.910479 | 0 | 0 | 564 | 0.391395 |
55a57c64b93ff64ee4143c416e8510e88ce162fa
| 8,022 |
py
|
Python
|
foulacces.py
|
Danukeru/FOULACCES
|
54304c7a91326f9517c45f6981c4ab8de4eb3964
|
[
"BSD-3-Clause"
] | 1 |
2019-10-21T23:43:21.000Z
|
2019-10-21T23:43:21.000Z
|
foulacces.py
|
Danukeru/FOULACCES
|
54304c7a91326f9517c45f6981c4ab8de4eb3964
|
[
"BSD-3-Clause"
] | null | null | null |
foulacces.py
|
Danukeru/FOULACCES
|
54304c7a91326f9517c45f6981c4ab8de4eb3964
|
[
"BSD-3-Clause"
] | 1 |
2019-10-21T23:43:29.000Z
|
2019-10-21T23:43:29.000Z
|
#!/usr/bin/env python
import os
import sys
import hashlib
import httplib
import base64
import socket
from xml.dom.minidom import *
RAC_CODE = { 'x' : 'Unknown error',
'0x0' : 'Success',
'0x4' : 'Number of arguments does not match',
'0xc' : 'Syntax error in xml2cli command',
'0x408' : 'Session Timeout',
'0x43' : 'No such subfunction',
'0x62' : 'Command not supported on this platform for this firmware',
'0xb0002' : 'Invalid handle',
'0x140000' : 'Too many sessions',
'0x140002' : 'Logout',
'0x140004' : 'Invalid password',
'0x140005' : 'Invalid username',
'0x150008' : 'Too many requests',
'0x15000a' : 'No such event',
'0x15000c' : 'No such function',
'0x15000d' : 'Unimplemented',
'0x170003' : 'Missing content in POST ?',
'0x170007' : 'Dont know yet',
'0x1a0004' : 'Invalid sensorname',
'0x10150006' : 'Unknown sensor error',
'0x10150009' : 'Too many sensors in sensorlist',
'0x20308' : 'Console not available',
'0x30003' : 'Console not active',
'0x3000a' : 'Console is in text mode',
'0x3000b' : 'Console is in VGA graphic mode',
'0x30011' : [ 'Console is in Linux mode (no ctrl+alt+del)',
'Console is in Windows or Netware mode' ],
'0xe0003' : 'Unknown serveraction',
'0xf0001' : 'Offset exceeds number of entries in eventlog',
'0xf0003' : 'Request exceeds number of entries in eventlog',
'0xf0004' : 'Invalid number of events requested'
}
SEVERITY = { 'x' : 'Unknown severity. ',
'' : '-',
'0x1' : 'Unknown',
'0x2' : 'OK',
'0x3' : 'Information',
'0x4' : 'Recoverable',
'0x5' : 'Non-Critical',
'0x6' : 'Critical',
'0x7' : 'Non-Recoverable',
}
BOGUS_IDS_1650 = [ '0x1010018', '0x1020010', '0x1020018',
'0x1020062', '0x1030010', '0x1030018',
'0x1030062', '0x1040010', '0x1040018',
'0x1050018', '0x1060010', '0x1060018',
'0x1060062', '0x1070018', '0x1070062',
'0x1080010', '0x1080062', '0x1090010',
'0x10a0010', '0x10f0062', '0x1100010',
'0x1110010', '0x1120010', '0x1120062',
'0x1130010', '0x1140010', '0x1150010',
'0x13b0010', '0x13c0010', '0x13f0010',
'0x14b0010', '0x14d0010', '0x20e0062',
'0x2110062', '0x2160061', '0x2160062',
'0x2170061', '0x2170062', '0x2180061',
'0x2180062', '0x2190061', '0x2190062',
'0x21a0061', '0x21a0062', '0x21b0061',
'0x21b0062', '0x21e0010', '0x21e0061',
'0x21e0062', '0x21f0061', '0x21f0062',
'0x2210010', '0x2220010', '0x2230010',
'0x2240010', '0x2250010', '0x2260010',
'0x2270010', '0x2280010', '0x2290010',
'0x22a0010', '0x22b0010', '0x22c0010',
'0x22d0010', '0x22e0010', '0x22f0010',
'0x2300010', '0x2310010', '0x2320010',
'0x2330010', '0x2340010', '0x2350010',
'0x2360010', '0x2370010', '0x2380010',
'0x2390010', '0x23a0010', '0x23e0010',
'0x2410010', '0x2420010', '0x2430010',
'0x2440010', '0x2450010', '0x2460010',
'0x2470010', '0x2480010', '0x2530010',
]
BOGUS_IDS_2650 = [ '0x1350010', '0x1360010', '0x2160061',
'0x2170061', '0x2180061', '0x2190061',
'0x21a0061', '0x21b0061', '0x21c0061',
'0x21d0061', '0x21e0060', '0x21e0061',
'0x21f0060', '0x21f0061', '0x2d00010',
]
BOGUS_IDS_1750 = [ '0x1060062', '0x1070062', '0x1080062',
'0x10f0062', '0x1120062', '0x1030062',
'0x1020062', '0x20e0062', '0x2110062',
'0x2160062', '0x2170062', '0x2180062',
'0x2190062', '0x21a0062', '0x21b0062',
'0x21f0062', '0x21e0062', '0x2160061',
'0x2170061', '0x2180061', '0x2190061',
'0x21a0061', '0x21b0061', '0x21f0061',
'0x21e0061', '0x1010010', '0x1020010',
'0x1030010', '0x1040010', '0x1080010',
'0x1090010', '0x10a0010', '0x1100010',
'0x1110010', '0x1120010', '0x1130010',
'0x1140010', '0x1150010', '0x21e0010',
'0x2210010', '0x2220010', '0x2230010',
'0x2240010', '0x2250010', '0x2260010',
'0x2290010', '0x22a0010', '0x22b0010',
'0x22c0010', '0x22d0010', '0x22e0010',
'0x22f0010', '0x2300010', '0x2310010',
'0x2320010', '0x2330010', '0x2340010',
'0x2350010', '0x2360010', '0x2370010',
'0x2380010', '0x2390010', '0x23a0010',
'0x13b0010', '0x13c0010', '0x13f0010',
'0x2440010', '0x2450010', '0x2460010',
'0x2470010', '0x2480010', '0x14a0010',
'0x14d0010', '0x14e0010', '0x1500010',
'0x1510010', '0x2000010', '0x2570010',
'0x10f0060', '0x1120060', '0x1020060',
'0x1010018', '0x1020018', '0x1030018',
'0x1040018', '0x1050018', '0x1060018',
'0x1070018',
]
PROPNAMES = [ 'NAME',
'SEVERITY',
'LOW_CRITICAL',
'LOW_NON_CRITICAL',
'VAL',
'UNITS',
'UPPER_NON_CRITICAL',
'UPPER_CRITICAL',
'SENSOR_TYPE',
]
DRIVE_SLOT_CODES = { '0' : 'Good',
'1' : 'No Error',
'2' : 'Faulty Drive',
'4' : 'Drive Rebuilding',
'8' : 'Drive In Failed Array',
'16' : 'Drive In Critical Array',
'32' : 'Parity Check Error',
'64' : 'Predicted Error',
'128' : 'No Drive',
}
POWER_UNIT_CODES = { '0' : 'AC Power Unit',
'1' : 'DC Power Unit',
}
BUTTON_CODES = { '0' : 'Power Button Disabled',
'1' : 'Power Button Enabled'
}
FAN_CONTROL_CODES = { '0' : 'Normal Operation',
'1' : 'Unknown',
}
INTRUSION_CODES = { '0' : 'No Intrusion',
'1' : 'Cover Intrusion Detected',
'2' : 'Bezel Intrusion Detected',
}
POWER_SUPPLY_CODES = { '1' : 'Good',
'2' : 'Failure Detected',
'4' : 'Failure Predicted',
'8' : 'Power Lost',
'16' : 'Not Present',
}
PROCESSOR_CODES = { '1' : 'Good',
'2' : 'Failure Detected',
'4' : 'Failure Predicted',
'8' : 'Power Lost',
'16' : 'Not Present',
}
CODES = { 'button' : BUTTON_CODES,
'drive slot' : DRIVE_SLOT_CODES,
'fan control' : FAN_CONTROL_CODES,
'intrusion' : INSTRUSION_CODES,
'power supply' : POWER_SUPPLY_CODES,
'power unit' : POWER_UNIT_CODES,
'processor' : PROCESSOR_CODES,
}
| 40.11 | 87 | 0.446771 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,028 | 0.502119 |
55a63e41c61dfc7f2803753c38bd275ef075fcb4
| 10,272 |
py
|
Python
|
codes/3_derive_elementary_effects.py
|
aviolinist/EEE
|
032e2029815229875048cc92dd7da24ff3f71e93
|
[
"MIT"
] | 6 |
2019-09-27T15:38:37.000Z
|
2021-02-03T13:58:01.000Z
|
codes/3_derive_elementary_effects.py
|
aviolinist/EEE
|
032e2029815229875048cc92dd7da24ff3f71e93
|
[
"MIT"
] | null | null | null |
codes/3_derive_elementary_effects.py
|
aviolinist/EEE
|
032e2029815229875048cc92dd7da24ff3f71e93
|
[
"MIT"
] | 5 |
2019-09-27T15:38:52.000Z
|
2022-03-22T17:24:37.000Z
|
#!/usr/bin/env python
from __future__ import print_function
# Copyright 2019 Juliane Mai - juliane.mai(at)uwaterloo.ca
#
# License
# This file is part of the EEE code library for "Computationally inexpensive identification
# of noninformative model parameters by sequential screening: Efficient Elementary Effects (EEE)".
#
# The EEE code library is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# The MVA code library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with The EEE code library.
# If not, see <https://github.com/julemai/EEE/blob/master/LICENSE>.
#
# If you use this method in a publication please cite:
#
# M Cuntz & J Mai et al. (2015).
# Computationally inexpensive identification of noninformative model parameters by sequential screening.
# Water Resources Research, 51, 6417-6441.
# https://doi.org/10.1002/2015WR016907.
#
#
#
# python 3_derive_elementary_effects.py \
# -i example_ishigami-homma/model_output.pkl \
# -d example_ishigami-homma/parameters.dat \
# -m example_ishigami-homma/parameter_sets_1_para3_M.dat \
# -v example_ishigami-homma/parameter_sets_1_para3_v.dat \
# -o example_ishigami-homma/eee_results.dat
"""
Derives the Elementary Effects based on model outputs stored as dictionary in a pickle file (option -i)
using specified model parameters (option -d). The model parameters were sampled beforehand as Morris
trajectories. The Morris trajectory information is stored in two files (option -m and option -v). The
Elementary Effects are stored in a file (option -o).
History
-------
Written, JM, Mar 2019
"""
# -------------------------------------------------------------------------
# Command line arguments
#
modeloutputs = 'example_ishigami-homma/model_output.pkl'
modeloutputkey = 'All'
maskfile = 'example_ishigami-homma/parameters.dat'
morris_M = 'example_ishigami-homma/parameter_sets_1_para3_M.dat'
morris_v = 'example_ishigami-homma/parameter_sets_1_para3_v.dat'
outfile = 'example_ishigami-homma/eee_results.dat'
skip = None # number of lines to skip in Morris files
import optparse
parser = optparse.OptionParser(usage='%prog [options]',
description="Derives the Elementary Effects based on model outputs stored as dictionary in a pickle file (option -i) using specified model parameters (option -d). The model parameters were sampled beforehand as Morris trajectories. The Morris trajectory information is stored in two files (option -m and option -v). The Elementary Effects are stored in a file (option -o).")
parser.add_option('-i', '--modeloutputs', action='store',
default=modeloutputs, dest='modeloutputs', metavar='modeloutputs',
help="Name of file used to save (scalar) model outputs in a pickle file (default: 'model_output.pkl').")
parser.add_option('-k', '--modeloutputkey', action='store',
default=modeloutputkey, dest='modeloutputkey', metavar='modeloutputkey',
help="Key of model output dictionary stored in pickle output file. If 'All', all model outputs are taken into account and multi-objective EEE is applied. (default: 'All').")
parser.add_option('-d', '--maskfile', action='store', dest='maskfile', type='string',
default=maskfile, metavar='File',
help='Name of file where all model parameters are specified including their distribution, distribution parameters, default value and if included in analysis or not. (default: maskfile=parameters.dat).')
parser.add_option('-m', '--morris_M', action='store', dest='morris_M', type='string',
default=morris_M, metavar='morris_M',
help="Morris trajectory information: The UNSCALED parameter sets. (default: 'parameter_sets_1_para3_M.dat').")
parser.add_option('-v', '--morris_v', action='store', dest='morris_v', type='string',
default=morris_v, metavar='morris_v',
help="Morris trajectory information: The indicator which parameter changed between subsequent sets in a trajectory. (default: 'parameter_sets_1_para3_v.dat').")
parser.add_option('-s', '--skip', action='store',
default=skip, dest='skip', metavar='skip',
help="Number of lines to skip in Morris output files (default: None).")
parser.add_option('-o', '--outfile', action='store', dest='outfile', type='string',
default=outfile, metavar='File',
help='File containing Elementary Effect estimates of all model parameters listed in parameter information file. (default: eee_results.dat).')
(opts, args) = parser.parse_args()
modeloutputs = opts.modeloutputs
modeloutputkey = opts.modeloutputkey
maskfile = opts.maskfile
morris_M = opts.morris_M
morris_v = opts.morris_v
outfile = opts.outfile
skip = opts.skip
del parser, opts, args
# -----------------------
# add subolder scripts/lib to search path
# -----------------------
import sys
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path+'/lib')
import numpy as np
import pickle
from fsread import fsread # in lib/
from autostring import astr # in lib/
# -------------------------
# read parameter info file
# -------------------------
# parameter info file has following header:
# # para dist lower upper default informative(0)_or_noninformative(1)
# # mean stddev
nc,snc = fsread(maskfile, comment="#",cskip=1,snc=[0,1],nc=[2,3,4,5])
snc = np.array(snc)
para_name = snc[:,0]
para_dist = snc[:,1]
lower_bound = nc[:,0]
upper_bound = nc[:,1]
initial = nc[:,2]
# if informative(0) -> maskpara=False
# if noninformative(1) -> maskpara=True
mask_para = np.where((nc[:,3].flatten())==1.,True,False)
dims_all = np.shape(mask_para)[0]
idx_para = np.arange(dims_all)[mask_para] # indexes of parameters which will be changed [0,npara-1]
dims = np.sum(mask_para)
# pick only non-masked bounds
lower_bound_mask = lower_bound[np.where(mask_para)]
upper_bound_mask = upper_bound[np.where(mask_para)]
para_dist_mask = para_dist[np.where(mask_para)]
para_name_mask = para_name[np.where(mask_para)]
# -------------------------
# read model outputs
# -------------------------
model_output = pickle.load( open( modeloutputs, "rb" ) )
if modeloutputkey == 'All':
keys = list(model_output.keys())
else:
keys = [ modeloutputkey ]
model_output = [ np.array(model_output[ikey]) for ikey in keys ]
nkeys = len(model_output)
# -------------------------
# read Morris M
# -------------------------
ff = open(morris_M, "r")
parasets = ff.readlines()
ff.close()
if skip is None:
skip = np.int(parasets[0].strip().split(':')[1])
else:
skip = np.int(skip)
parasets = parasets[skip:]
for iparaset,paraset in enumerate(parasets):
parasets[iparaset] = list(map(float,paraset.strip().split()))
parasets = np.array(parasets)
# -------------------------
# read Morris v
# -------------------------
ff = open(morris_v, "r")
parachanged = ff.readlines()
ff.close()
if skip is None:
skip = np.int(parachanged[0].strip().split(':')[1])
else:
skip = np.int(skip)
parachanged = parachanged[skip:]
for iparachanged,parachan in enumerate(parachanged):
parachanged[iparachanged] = np.int(parachan.strip())
parachanged = np.array(parachanged)
# -------------------------
# calculate Elementary Effects
# -------------------------
ee = np.zeros([dims_all,nkeys],dtype=float)
ee_counter = np.zeros([dims_all,nkeys],dtype=int)
ntraj = np.int( np.shape(parasets)[0] / (dims+1) )
nsets = np.shape(parasets)[0]
for ikey in range(nkeys):
for iset in range(nsets):
ipara_changed = parachanged[iset]
if ipara_changed != -1:
ee_counter[ipara_changed,ikey] += 1
if ( len(np.shape(model_output[ikey])) == 1):
# scalar model output
ee[ipara_changed,ikey] += np.abs(model_output[ikey][iset]-model_output[ikey][iset+1]) / np.abs(parasets[iset,ipara_changed] - parasets[iset+1,ipara_changed])
elif ( len(np.shape(model_output[ikey])) == 2):
# 1D model output
ee[ipara_changed,ikey] += np.mean(np.abs(model_output[ikey][iset,:]-model_output[ikey][iset+1,:]) / np.abs(parasets[iset,ipara_changed] - parasets[iset+1,ipara_changed]))
else:
raise ValueError('Only scalar and 1D model outputs are supported!')
for ikey in range(nkeys):
for ipara in range(dims_all):
if ee_counter[ipara,ikey] > 0:
ee[ipara,ikey] /= ee_counter[ipara,ikey]
# -------------------------
# write final file
# -------------------------
# format:
# # model output #1: 'out1'
# # model output #2: 'out2'
# # ii para_name elemeffect(ii),ii=1:3,jj=1:1 counter(ii),ii=1:3,jj=1:1
# 1 'x_1' 0.53458196335158181 5
# 2 'x_2' 7.0822368906630215 5
# 3 'x_3' 3.5460086652980554 5
f = open(outfile, 'w')
for ikey in range(nkeys):
f.write('# model output #'+str(ikey+1)+': '+keys[ikey]+'\n')
f.write('# ii para_name elemeffect(ii),ii=1:'+str(dims_all)+',jj=1:'+str(nkeys)+' counter(ii),ii=1:'+str(dims_all)+',jj=1:'+str(nkeys)+' \n')
for ipara in range(dims_all):
f.write(str(ipara)+' '+para_name[ipara]+' '+' '.join(astr(ee[ipara,:],prec=8))+' '+' '.join(astr(ee_counter[ipara,:]))+'\n')
f.close()
print("wrote: '"+outfile+"'")
| 43.897436 | 405 | 0.633178 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,612 | 0.54634 |
55a64a7a3b06450aa004faf6e58c77885b9ba532
| 1,377 |
py
|
Python
|
leetcode/medium/113-Path_sum_II.py
|
shubhamoli/practice
|
5a24fdeb6e5f43b821ef0510fe3b343ddda18f22
|
[
"MIT"
] | 1 |
2020-02-25T10:32:27.000Z
|
2020-02-25T10:32:27.000Z
|
leetcode/medium/113-Path_sum_II.py
|
shubhamoli/practice
|
5a24fdeb6e5f43b821ef0510fe3b343ddda18f22
|
[
"MIT"
] | null | null | null |
leetcode/medium/113-Path_sum_II.py
|
shubhamoli/practice
|
5a24fdeb6e5f43b821ef0510fe3b343ddda18f22
|
[
"MIT"
] | null | null | null |
"""
Leetcode #113
"""
from typing import List
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def pathSum(self, root: TreeNode, sum: int) -> List[List[int]]:
if not root:
return None
res = []
def helper(node, curr, target):
curr.append(node.val)
target = target - node.val
if not node.left and not not.right and target == 0:
res.append(curr[:])
if node.left:
helper(node.left, curr, target)
if node.right:
helper(node.right, curr, target)
tmp = curr.pop()
target += tmp
helper(root, [], sum)
return res
if __name__ == "__main__":
root = TreeNode(5)
root.left = TreeNode(4)
root.left.left = TreeNode(11)
root.left.left.left = TreeNode(7)
root.left.left.right = TreeNode(2)
root.right = TreeNode(8)
root.right.left = TreeNode(13)
root.right.right = TreeNode(4)
root.right.right.left = TreeNode(5)
root.right.right.right = TreeNode(1)
"""
Expected Tree
5
/ \
4 8
/ / \
11 13 4
/ \ / \
7 2 5 1
"""
print(Solution().pathSum(root, 22))
| 19.394366 | 67 | 0.511256 | 764 | 0.554829 | 0 | 0 | 0 | 0 | 0 | 0 | 169 | 0.122731 |
55a6a32920fa2fc82181f6e01d6935314fa6f974
| 137 |
py
|
Python
|
transiter_ny_mta/transiter_ny_mta/__init__.py
|
Pizza-Ratz/transiter-ny
|
40091d3ff0c1b9e046b0d3ca708acb81df5019c6
|
[
"MIT"
] | 1 |
2021-01-25T16:02:14.000Z
|
2021-01-25T16:02:14.000Z
|
transiter_ny_mta/transiter_ny_mta/__init__.py
|
Pizza-Ratz/transiter-ny
|
40091d3ff0c1b9e046b0d3ca708acb81df5019c6
|
[
"MIT"
] | null | null | null |
transiter_ny_mta/transiter_ny_mta/__init__.py
|
Pizza-Ratz/transiter-ny
|
40091d3ff0c1b9e046b0d3ca708acb81df5019c6
|
[
"MIT"
] | 1 |
2021-07-02T14:34:04.000Z
|
2021-07-02T14:34:04.000Z
|
from .alertsparser import AlertsParser
from .subwaytripsparser import SubwayTripsParser
from .stationscsvparser import StationsCsvParser
| 34.25 | 48 | 0.890511 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
55a76346989d9cefd61701c39bcea10af1d5f5b9
| 4,254 |
py
|
Python
|
main.py
|
MrValdez/ggj-2018
|
d8806a47f561f54afd915d7b5e03181fbd2dbcfa
|
[
"MIT"
] | null | null | null |
main.py
|
MrValdez/ggj-2018
|
d8806a47f561f54afd915d7b5e03181fbd2dbcfa
|
[
"MIT"
] | null | null | null |
main.py
|
MrValdez/ggj-2018
|
d8806a47f561f54afd915d7b5e03181fbd2dbcfa
|
[
"MIT"
] | 1 |
2018-02-25T15:04:43.000Z
|
2018-02-25T15:04:43.000Z
|
import os
import pygame
from input import Input
from stages.stage import Stage
from stages.stage_example import StageExample
from stages.stage1 import Stage1
from stages.stage2 import Stage2
from stages.stage3 import Stage3
from stages.stage4 import Stage4
from stages.stage5 import Stage5
from stages.stage6 import Stage6
from stages.stage7 import Stage7
from stages.stage8 import Stage8
from stages.stage9 import Stage9
from stages.stage10 import Stage10
from stages.stage11 import Stage11
from stages.stage12 import Stage12
from stages.stage13 import Stage13
from stages.stage14 import Stage14
from stages.stage15 import Stage15
from stages.stage16 import Stage16
from stages.stage17 import Stage17
from stages.stage18 import Stage18
from stages.stage19 import Stage19
from stages.stage20 import Stage20
from stages.stage21 import Stage21
from stages.stage22 import Stage22
from stages.stage23 import Stage23
from stages.stage24 import Stage24
from stages.stage25 import Stage25
from stages.stage26 import Stage26
from stages.stage27 import Stage27
from stages.stage28 import Stage28
from stages.stage29 import Stage29
from stages.stage30 import Stage30
from stages.stage31 import Stage31
from stages.stage32 import Stage32
from stages.stage_start import Stage_start
from stages.stage_end import Stage_end
from stages.stage_transition import Stage_transition
#os.environ['SDL_VIDEO_WINDOW_POS'] = "1, 0"
os.environ['SDL_VIDEO_WINDOW_POS'] = "100, 10"
resolution = [800, 600]
pygame.init()
pygame.mouse.set_visible(False)
pygame.display.set_caption("32 bits of delivery")
screen = pygame.display.set_mode(resolution)
clock = pygame.time.Clock()
GameIsRunning = True
input = Input()
stages = [
# StageExample(resolution),
# Stage1(resolution),
Stage_start(resolution),
Stage2(resolution), # have you tried turning it on and off again?
Stage29(resolution), # Button mash to transmit
Stage27(resolution), # Stop Spamming
Stage26(resolution), # Share love by petting
Stage8(resolution), # Two auth factor
Stage7(resolution), # USB connection
Stage16(resolution), # Poop
Stage18(resolution), # Upgrade PC
Stage9(resolution), # Dancing
Stage22(resolution), # Psychic transmission
Stage21(resolution), # Fix TV
Stage20(resolution), # Tune TV signal
Stage17(resolution), # Buy coffee
Stage25(resolution), # Share regrets
Stage23(resolution), # Send SMS
Stage13(resolution), # Love transmission!
Stage3(resolution), # chrome game
Stage15(resolution), # Clap to transmit noise
Stage19(resolution), # Sell trash
Stage14(resolution), # Find the strongest transmission
Stage28(resolution), # Game and Watch
Stage24(resolution), # Send Like
Stage6(resolution), # energize with coffee
Stage5(resolution), # crowd surfing game
Stage32(resolution), # transmit knowledge
Stage30(resolution), # transmit toothpaste
Stage31(resolution), # transmit toothpaste to teeth
Stage12(resolution), # Charge!
Stage11(resolution), # Space Defender
Stage4(resolution), # punching game
Stage10(resolution), # Ninja Turtle Van
Stage_end(resolution),
]
# add transtitions
updated_stages = []
for stage in stages:
updated_stages.append(stage)
updated_stages.append(Stage_transition(resolution))
stages = updated_stages
currentStage = 0
#currentStage = -2
while GameIsRunning:
pygame.display.flip()
tick = clock.tick(60)
screen.fill([0, 0, 0])
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
GameIsRunning = False
if event.type == pygame.QUIT:
GameIsRunning = False
if not GameIsRunning:
pygame.quit()
break
input.update()
complete = stages[currentStage].update(input, tick)
if complete:
currentStage = (currentStage + 1) % len(stages)
stages[currentStage].__init__(resolution)
stages[currentStage].draw(screen)
| 32.723077 | 77 | 0.704278 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 737 | 0.173249 |
55a8a143755092a98ad8640901e8dbdb8d58845f
| 9,439 |
py
|
Python
|
install/app_store/tk-framework-desktopserver/v1.3.1/python/tk_framework_desktopserver/command.py
|
JoanAzpeitia/lp_sg
|
e0ee79555e419dd2ae3a5f31e5515b3f40b22a62
|
[
"MIT"
] | null | null | null |
install/app_store/tk-framework-desktopserver/v1.3.1/python/tk_framework_desktopserver/command.py
|
JoanAzpeitia/lp_sg
|
e0ee79555e419dd2ae3a5f31e5515b3f40b22a62
|
[
"MIT"
] | null | null | null |
install/app_store/tk-framework-desktopserver/v1.3.1/python/tk_framework_desktopserver/command.py
|
JoanAzpeitia/lp_sg
|
e0ee79555e419dd2ae3a5f31e5515b3f40b22a62
|
[
"MIT"
] | 1 |
2020-02-15T10:42:56.000Z
|
2020-02-15T10:42:56.000Z
|
# Copyright (c) 2013 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
import os
import subprocess
from threading import Thread
from Queue import Queue
import tempfile
import sys
import traceback
from .logger import get_logger
logger = get_logger(__name__)
class ReadThread(Thread):
"""
Thread that reads a pipe.
"""
def __init__(self, p_out, target_queue):
"""
Constructor.
:param p_out: Pipe to read.
:param target_queue: Queue that will accumulate the pipe output.
"""
Thread.__init__(self)
self.pipe = p_out
self.target_queue = target_queue
def run(self):
"""
Reads the contents of the pipe and adds it to the queue until the pipe
is closed.
"""
while True:
line = self.pipe.readline() # blocking read
if line == '':
break
self.target_queue.put(line)
class Command(object):
@staticmethod
def _create_temp_file():
"""
:returns: Returns the path to a temporary file.
"""
handle, path = tempfile.mkstemp(prefix="desktop_server")
os.close(handle)
return path
@staticmethod
def call_cmd(args):
"""
Runs a command in a separate process.
:param args: Command line tokens.
:returns: A tuple containing (exit code, stdout, stderr).
"""
# The commands that are being run are probably being launched from Desktop, which would
# have a TANK_CURRENT_PC environment variable set to the site configuration. Since we
# preserve that value for subprocesses (which is usually the behavior we want), the DCCs
# being launched would try to run in the project environment and would get an error due
# to the conflict.
#
# Clean up the environment to prevent that from happening.
env = os.environ.copy()
vars_to_remove = ["TANK_CURRENT_PC"]
for var in vars_to_remove:
if var in env:
del env[var]
# Launch the child process
# Due to discrepencies on how child file descriptors and shell=True are
# handled on Windows and Unix, we'll provide two implementations. See the Windows
# implementation for more details.
if sys.platform == "win32":
ret, stdout_lines, stderr_lines = Command._call_cmd_win32(args, env)
else:
ret, stdout_lines, stderr_lines = Command._call_cmd_unix(args, env)
out = ''.join(stdout_lines)
err = ''.join(stderr_lines)
return ret, out, err
@staticmethod
def _call_cmd_unix(args, env):
"""
Runs a command in a separate process. Implementation for Unix based OSes.
:param args: Command line tokens.
:param env: Environment variables to set for the subprocess.
:returns: A tuple containing (exit code, stdout, stderr).
"""
# Note: Tie stdin to a PIPE as well to avoid this python bug on windows
# http://bugs.python.org/issue3905
# Queue code taken from: http://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python
stdout_lines = []
stderr_lines = []
try:
process = subprocess.Popen(
args,
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env
)
process.stdin.close()
stdout_q = Queue()
stderr_q = Queue()
stdout_t = ReadThread(process.stdout, stdout_q)
stdout_t.setDaemon(True)
stdout_t.start()
stderr_t = ReadThread(process.stderr, stderr_q)
stderr_t.setDaemon(True)
stderr_t.start()
# Popen.communicate() doesn't play nicely if the stdin pipe is closed
# as it tries to flush it causing an 'I/O error on closed file' error
# when run from a terminal
#
# to avoid this, lets just poll the output from the process until
# it's finished
process.wait()
try:
process.stdout.flush()
process.stderr.flush()
except IOError:
# This fails on OSX 10.7, but it looks like there's no ill side effect
# from failing on that platform so we can ignore it.
logger.exception("Error while flushing file descriptor:")
stdout_t.join()
stderr_t.join()
while not stdout_q.empty():
stdout_lines.append(stdout_q.get())
while not stderr_q.empty():
stderr_lines.append(stderr_q.get())
ret = process.returncode
except StandardError:
# Do not log the command line, it might contain sensitive information!
logger.exception("Error running subprocess:")
ret = 1
stderr_lines = traceback.format_exc().split()
stderr_lines.append("%s" % args)
return ret, stdout_lines, stderr_lines
@staticmethod
def _call_cmd_win32(args, env):
"""
Runs a command in a separate process. Implementation for Windows.
:param args: Command line tokens.
:param env: Environment variables to set for the subprocess.
:returns: A tuple containing (exit code, stdout, stderr).
"""
stdout_lines = []
stderr_lines = []
try:
stdout_path = Command._create_temp_file()
stderr_path = Command._create_temp_file()
# On Windows, file descriptors like sockets can be inherited by child
# process and are only closed when the main process and all child
# processes are closed. This is bad because it means that the port
# the websocket server uses will never be released as long as any DCCs
# or tank commands are running. Therefore, closing the Desktop and
# restarting it for example wouldn't free the port and would give the
# "port 9000 already in use" error we've seen before.
# To avoid this, close_fds needs to be specified when launching a child
# process. However, there's a catch. On Windows, specifying close_fds
# also means that you can't share stdout, stdin and stderr with the child
# process, which is required here because we want to capture the output
# of the process.
# Therefore on Windows we'll invoke the code in a shell environment. The
# output will be redirected to two temporary files which will be read
# when the child process is over.
# Ideally, we'd be using this implementation on Unix as well. After all,
# the syntax of the command line is the same. However, specifying shell=True
# on Unix means that the following ["ls", "-al"] would be invoked like this:
# ["/bin/sh", "-c", "ls", "-al"]. This means that only ls is sent to the
# shell and -al is considered to be an argument of the shell and not part
# of what needs to be launched. The naive solution would be to quote the
# argument list and pass ["\"ls -al \""] to Popen, but that would ignore
# the fact that there could already be quotes on that command line and
# they would need to be escaped as well. Python 2's only utility to
# escape strings for the command line is pipes.quote, which is deprecated.
# Because of these reasons, we'll keep both implementations for now.
args = args + ["1>", stdout_path, "2>", stderr_path]
# Prevents the cmd.exe dialog from appearing on Windows.
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
process = subprocess.Popen(
args,
close_fds=True,
startupinfo=startupinfo,
env=env,
shell=True
)
process.wait()
# Read back the output from the two.
with open(stdout_path) as stdout_file:
stdout_lines = [l for l in stdout_file]
with open(stderr_path) as stderr_file:
stderr_lines = [l for l in stderr_file]
# Track the result code.
ret = process.returncode
except StandardError:
logger.exception("Error running subprocess:")
ret = 1
stderr_lines = [traceback.format_exc().split()]
stderr_lines.append("%s" % args)
# Don't lose any sleep over temporary files that can't be deleted.
try:
os.remove(stdout_path)
except:
pass
try:
os.remove(stderr_path)
except:
pass
return ret, stdout_lines, stderr_lines
| 36.727626 | 123 | 0.604831 | 8,799 | 0.932196 | 0 | 0 | 8,070 | 0.854963 | 0 | 0 | 4,893 | 0.518381 |
55acdcacf4ba82a80f3cb7a16e721e05d9bb07b7
| 127 |
py
|
Python
|
knock-knock4/knockpy/__init__.py
|
abhinashjain/proxyfuzzer
|
9c372390afe4cd3d277bcaaeb289e4c8ef398e5e
|
[
"BSD-3-Clause"
] | 1 |
2017-03-14T21:16:43.000Z
|
2017-03-14T21:16:43.000Z
|
knock-knock4/knockpy/__init__.py
|
abhinashjain/proxyfuzzer
|
9c372390afe4cd3d277bcaaeb289e4c8ef398e5e
|
[
"BSD-3-Clause"
] | 1 |
2016-12-19T16:35:53.000Z
|
2016-12-22T19:40:30.000Z
|
knock-knock4/knockpy/__init__.py
|
abhinashjain/proxyfuzzer
|
9c372390afe4cd3d277bcaaeb289e4c8ef398e5e
|
[
"BSD-3-Clause"
] | 2 |
2018-06-15T02:00:49.000Z
|
2021-09-08T19:15:35.000Z
|
import os
_ROOT = os.path.abspath(os.path.dirname(__file__))
def get_data(path):
return os.path.join(_ROOT, 'wordlist', path)
| 25.4 | 50 | 0.748031 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.07874 |
55ae9ba4b65519bc33be7de8562a205f27c9a655
| 745 |
py
|
Python
|
brilws/cli/briltag_insertdata.py
|
xiezhen/brilws
|
e3652dd4506dff9d713184ff623b59bc11fbe2c7
|
[
"MIT"
] | 1 |
2017-03-23T16:26:06.000Z
|
2017-03-23T16:26:06.000Z
|
brilws/cli/briltag_insertdata.py
|
xiezhen/brilws
|
e3652dd4506dff9d713184ff623b59bc11fbe2c7
|
[
"MIT"
] | 1 |
2017-03-24T15:02:20.000Z
|
2017-10-02T13:43:26.000Z
|
brilws/cli/briltag_insertdata.py
|
xiezhen/brilws
|
e3652dd4506dff9d713184ff623b59bc11fbe2c7
|
[
"MIT"
] | 1 |
2019-12-06T09:23:01.000Z
|
2019-12-06T09:23:01.000Z
|
"""
Usage:
briltag insertdata [options]
Options:
-h --help Show this screen.
-c CONNECT Service name [default: onlinew]
-p AUTHPATH Authentication file
--name TAGNAME Name of the data tag
--comments COMMENTS Comments on the tag
"""
from docopt import docopt
from schema import Schema
from brilws.cli import clicommonargs
def validate(optdict):
myvalidables = ['-c','-p','--name','--comments',str]
argdict = dict((k,v) for k,v in clicommonargs.argvalidators.items() if k in myvalidables)
s = Schema(argdict)
result = s.validate(optdict)
return result
if __name__ == '__main__':
print (docopt(__doc__,options_first=True))
| 25.689655 | 93 | 0.625503 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 365 | 0.489933 |
55b3f38a36b36ad5c48a9910aaae79865f7775ae
| 17,152 |
py
|
Python
|
techniques/volumerec.py
|
lleonart1984/rendezvous
|
f8f5e73fa1ede7c33d8cf08548bce1475a0cc8da
|
[
"MIT"
] | null | null | null |
techniques/volumerec.py
|
lleonart1984/rendezvous
|
f8f5e73fa1ede7c33d8cf08548bce1475a0cc8da
|
[
"MIT"
] | null | null | null |
techniques/volumerec.py
|
lleonart1984/rendezvous
|
f8f5e73fa1ede7c33d8cf08548bce1475a0cc8da
|
[
"MIT"
] | null | null | null |
from rendering.manager import *
from rendering.scenes import *
from rendering.training import *
import random
import glm
import os
import numpy as np
import math
__VOLUME_RECONSTRUCTION_SHADERS__ = os.path.dirname(__file__)+"/shaders/VR"
compile_shader_sources(__VOLUME_RECONSTRUCTION_SHADERS__)
class RayGenerator(RendererModule):
def __init__(self, device, output_dim: (int, int), mode: int, *args, **kwargs):
self.output_dim = output_dim
self.mode = mode
self.camera_buffer = None
super().__init__(device, *args, **kwargs)
def setup(self):
self.camera_buffer = self.device.create_uniform_buffer(
ProjToWorld=glm.mat4
)
pipeline = self.device.create_compute_pipeline()
pipeline.load_compute_shader(__VOLUME_RECONSTRUCTION_SHADERS__+"/raygen.comp.spv")
pipeline.bind_storage_buffer(0, ShaderStage.COMPUTE, lambda: self.pipeline.rays)
pipeline.bind_uniform(1, ShaderStage.COMPUTE, lambda: self.camera_buffer)
pipeline.bind_constants(
0, ShaderStage.COMPUTE,
dim=glm.ivec2,
mode=int,
seed=int
)
pipeline.close()
self.pipeline = pipeline
def forward_render(self, inputs):
origins, targets = inputs
origins = origins.reshape(-1, 3)
targets = targets.reshape(-1, 3)
full_rays = torch.zeros(len(origins) * self.output_dim[0] * self.output_dim[1], 6, device=origins.device)
for i, (o, t) in enumerate(zip(origins, targets)):
self.pipeline.rays = self.wrap_tensor(torch.zeros(self.output_dim[0] * self.output_dim[1], 6, device=origins.device), False)
# Setup camera
proj = glm.perspective(45, self.output_dim[1] / self.output_dim[0], 0.01, 1000)
view = glm.lookAt(glm.vec3(*o), glm.vec3(*t), glm.vec3(0, 1, 0))
proj_to_model = glm.inverse(proj * view)
self.camera_buffer.ProjToWorld = proj_to_model
with self.device.get_compute() as man:
man.set_pipeline(self.pipeline)
man.update_sets(0)
man.update_constants(
ShaderStage.COMPUTE,
dim=glm.ivec2(self.output_dim[1], self.output_dim[0]),
mode=self.mode,
seed=np.random.randint(0, 10000000)
)
man.dispatch_threads_2D(self.output_dim[1], self.output_dim[0])
t = self.get_tensor(self.pipeline.rays)
full_rays[i*self.output_dim[0]*self.output_dim[1]:(i+1)*self.output_dim[0]*self.output_dim[1]] = t
return [full_rays]
class TransmittanceRenderer(RendererModule):
def __init__(self, device, *args, **kwargs):
super().__init__(device, *args, **kwargs)
def setup(self):
self.medium_buffer = self.device.create_uniform_buffer(
scatteringAlbedo=glm.vec3,
density=float,
phase_g=float
)
pipeline = self.device.create_compute_pipeline()
pipeline.load_compute_shader(__VOLUME_RECONSTRUCTION_SHADERS__ + '/forward.comp.spv')
pipeline.bind_storage_buffer(0, ShaderStage.COMPUTE, lambda: self.forward_pipeline.grid)
pipeline.bind_storage_buffer(1, ShaderStage.COMPUTE, lambda: self.forward_pipeline.rays)
pipeline.bind_storage_buffer(2, ShaderStage.COMPUTE, lambda: self.forward_pipeline.transmittances)
pipeline.bind_uniform(3, ShaderStage.COMPUTE, lambda: self.medium_buffer)
pipeline.bind_constants(0, ShaderStage.COMPUTE,
grid_dim=glm.ivec3,
number_of_rays=int
)
pipeline.close()
self.forward_pipeline = pipeline
pipeline = self.device.create_compute_pipeline()
pipeline.load_compute_shader(__VOLUME_RECONSTRUCTION_SHADERS__ + '/backward.comp.spv')
pipeline.bind_storage_buffer(0, ShaderStage.COMPUTE, lambda: self.backward_pipeline.grid_gradients)
pipeline.bind_storage_buffer(1, ShaderStage.COMPUTE, lambda: self.backward_pipeline.rays)
pipeline.bind_storage_buffer(2, ShaderStage.COMPUTE, lambda: self.backward_pipeline.transmittances)
pipeline.bind_storage_buffer(3, ShaderStage.COMPUTE, lambda: self.backward_pipeline.transmittance_gradients)
pipeline.bind_uniform(4, ShaderStage.COMPUTE, lambda: self.medium_buffer)
pipeline.bind_constants(0, ShaderStage.COMPUTE,
grid_dim=glm.ivec3,
number_of_rays=int
)
pipeline.close()
self.backward_pipeline = pipeline
def set_medium(self, scattering_albedo: glm.vec3, density: float, phase_g: float):
self.medium_buffer.scatteringAlbedo = scattering_albedo
self.medium_buffer.density = density
self.medium_buffer.phase_g = phase_g
def forward_render(self, inputs):
rays, grid = inputs
grid_dim = grid.shape
ray_count = torch.numel(rays) // 6
self.forward_pipeline.rays = self.wrap_tensor(rays)
self.forward_pipeline.grid = self.wrap_tensor(grid)
self.forward_pipeline.transmittances = self.wrap_tensor(torch.zeros(ray_count, 3, device=rays.device), False)
with self.device.get_compute() as man:
man.set_pipeline(self.forward_pipeline)
man.update_sets(0)
man.update_constants(ShaderStage.COMPUTE,
grid_dim=glm.ivec3(grid_dim[2], grid_dim[1], grid_dim[0]),
number_of_rays=ray_count
)
man.dispatch_threads_1D(ray_count)
return [self.get_tensor(self.forward_pipeline.transmittances)]
def backward_render(self, inputs, outputs, output_gradients):
rays, grid = inputs
transmittances, = outputs
transmittance_gradients, = output_gradients
grid_dim = grid.shape
ray_count = torch.numel(rays) // 6
self.backward_pipeline.rays = self.wrap_tensor(rays)
self.backward_pipeline.transmittances = self.wrap_tensor(transmittances)
self.backward_pipeline.transmittance_gradients = self.wrap_tensor(transmittance_gradients)
self.backward_pipeline.grid_gradients = self.wrap_tensor(torch.zeros_like(grid))
with self.device.get_compute() as man:
man.set_pipeline(self.backward_pipeline)
man.update_sets(0)
man.update_constants(ShaderStage.COMPUTE,
grid_dim=glm.ivec3(grid_dim[2], grid_dim[1], grid_dim[0]),
number_of_rays=ray_count
)
man.dispatch_threads_1D(ray_count)
return [None, self.get_tensor(self.backward_pipeline.grid_gradients)]
class ResampleGrid(RendererModule):
def __init__(self, device: DeviceManager, output_dim: (int, int, int), *args, **kwargs):
self.output_dim = output_dim
super().__init__(device, *args, **kwargs)
def setup(self):
pipeline = self.device.create_compute_pipeline()
pipeline.load_compute_shader(__VOLUME_RECONSTRUCTION_SHADERS__ + "/resampling.comp.spv")
pipeline.bind_storage_buffer(0, ShaderStage.COMPUTE, lambda: self.pipeline.dst_grid)
pipeline.bind_storage_buffer(1, ShaderStage.COMPUTE, lambda: self.pipeline.src_grid)
pipeline.bind_constants(0, ShaderStage.COMPUTE,
dst_grid_dim=glm.ivec3, rem0=float,
src_grid_dim=glm.ivec3, rem1=float
)
pipeline.close()
self.pipeline = pipeline
def forward_render(self, inputs: List[torch.Tensor]):
src_grid, = inputs
self.pipeline.src_grid = self.wrap_tensor(src_grid)
self.pipeline.dst_grid = self.wrap_tensor(torch.zeros(self.output_dim, device=src_grid.device))
src_grid_dim = src_grid.shape
dst_grid_dim = self.output_dim
with self.device.get_compute() as man:
man.set_pipeline(self.pipeline)
man.update_sets(0)
man.update_constants(ShaderStage.COMPUTE,
dst_grid_dim=glm.ivec3(dst_grid_dim[2], dst_grid_dim[1], dst_grid_dim[0]),
src_grid_dim=glm.ivec3(src_grid_dim[2], src_grid_dim[1], src_grid_dim[0])
)
man.dispatch_threads_1D(dst_grid_dim[0] * dst_grid_dim[1] * dst_grid_dim[0])
return [self.get_tensor(self.pipeline.dst_grid)]
class TransmittanceGenerator(Technique):
def __init__(self, grid, output_image):
super().__init__()
self.grid = grid
self.output_image = output_image
self.width, self.height = output_image.width, output_image.height
def __setup__(self):
# rays
self.rays = self.create_buffer(6 * 4 * self.width * self.height,
BufferUsage.STORAGE | BufferUsage.TRANSFER_SRC | BufferUsage.TRANSFER_DST,
MemoryProperty.GPU)
# Transmittance
self.transmittances = self.create_buffer(3 * 4 * self.width * self.height,
BufferUsage.STORAGE | BufferUsage.TRANSFER_SRC | BufferUsage.TRANSFER_DST,
MemoryProperty.GPU)
# camera buffer
self.camera_buffer = self.create_uniform_buffer(
ProjToWorld=glm.mat4
)
# medium properties
self.medium_buffer = self.create_uniform_buffer(
scatteringAlbedo=glm.vec3,
density=float,
phase_g=float
)
pipeline = self.create_compute_pipeline()
pipeline.load_compute_shader(__VOLUME_RECONSTRUCTION_SHADERS__+'/generator.comp.spv')
pipeline.bind_storage_image(0, ShaderStage.COMPUTE, lambda: self.output_image)
pipeline.bind_storage_image(1, ShaderStage.COMPUTE, lambda: self.grid)
pipeline.bind_storage_buffer(2, ShaderStage.COMPUTE, lambda: self.rays)
pipeline.bind_storage_buffer(3, ShaderStage.COMPUTE, lambda: self.transmittances)
pipeline.bind_uniform(4, ShaderStage.COMPUTE, lambda: self.camera_buffer)
pipeline.bind_uniform(5, ShaderStage.COMPUTE, lambda: self.medium_buffer)
pipeline.close()
self.pipeline = pipeline
self.set_camera(glm.vec3(0,0,-3), glm.vec3(0,0,0))
self.set_medium(glm.vec3(1,1,1), 10, 0.875)
def set_camera(self, look_from: glm.vec3, look_to: glm.vec3):
# Setup camera
proj = glm.perspective(45, self.width / self.height, 0.01, 1000)
view = glm.lookAt(look_from, look_to, glm.vec3(0, 1, 0))
proj_to_model = glm.inverse(proj * view)
self.camera_buffer.ProjToWorld = proj_to_model
def set_medium(self, scattering_albedo: glm.vec3, density: float, phase_g: float):
self.medium_buffer.scatteringAlbedo = scattering_albedo
self.medium_buffer.density = density
self.medium_buffer.phase_g = phase_g
def __dispatch__(self):
with self.get_compute() as man:
man.set_pipeline(self.pipeline)
man.update_sets(0)
man.dispatch_threads_2D(self.width, self.height)
class TransmittanceForward(Technique):
def __init__(self, rays_resolver, grid_dim: (int, int, int), grid_resolver, transmittance_resolver):
super().__init__()
self.rays_resolver = rays_resolver # input
self.grid_resolver = grid_resolver # params
self.transmittance_resolver = transmittance_resolver # output
self.grid_dim = glm.ivec3(grid_dim)
def set_medium(self, scattering_albedo: glm.vec3, density: float, phase_g: float):
self.medium_buffer.scatteringAlbedo = scattering_albedo
self.medium_buffer.density = density
self.medium_buffer.phase_g = phase_g
def __setup__(self):
# medium properties
self.medium_buffer = self.create_uniform_buffer(
scatteringAlbedo=glm.vec3,
density=float,
phase_g=float
)
pipeline = self.create_compute_pipeline()
pipeline.load_compute_shader(__VOLUME_RECONSTRUCTION_SHADERS__ + '/forward.comp.spv')
pipeline.bind_storage_buffer(0, ShaderStage.COMPUTE, self.grid_resolver)
pipeline.bind_storage_buffer(1, ShaderStage.COMPUTE, self.rays_resolver)
pipeline.bind_storage_buffer(2, ShaderStage.COMPUTE, self.transmittance_resolver)
pipeline.bind_uniform(3, ShaderStage.COMPUTE, lambda: self.medium_buffer)
pipeline.bind_constants(0, ShaderStage.COMPUTE,
grid_dim = glm.ivec3,
number_of_rays = int
)
pipeline.close()
self.pipeline = pipeline
self.set_medium(glm.vec3(1, 1, 1), 10, 0.875)
def __dispatch__(self):
rays = self.rays_resolver()
with self.get_compute() as man:
man.set_pipeline(self.pipeline)
man.update_sets(0)
ray_count = rays.size // (4*3*2)
man.update_constants(ShaderStage.COMPUTE,
grid_dim=self.grid_dim,
number_of_rays=ray_count
)
man.dispatch_threads_1D(ray_count)
class TransmittanceBackward(Technique):
def __init__(self, rays, grid_dim, gradient_densities, transmittances, gradient_transmittances):
super().__init__()
self.grid_dim = grid_dim
self.rays = rays # buffer with rays configurations (origin, direction)
self.gradient_densities = gradient_densities # Flatten grid 512x512x512 used as parameters
self.transmittances = transmittances # Float with transmittance for each ray
self.gradient_transmittances = gradient_transmittances
self.pipeline = None
def set_medium(self, scattering_albedo: glm.vec3, density: float, phase_g: float):
self.medium_buffer.scatteringAlbedo = scattering_albedo
self.medium_buffer.density = density
self.medium_buffer.phase_g = phase_g
def __setup__(self):
# medium properties
self.medium_buffer = self.create_uniform_buffer(
scatteringAlbedo=glm.vec3,
density=float,
phase_g=float
)
pipeline = self.create_compute_pipeline()
pipeline.load_compute_shader(__VOLUME_RECONSTRUCTION_SHADERS__ + '/backward.comp.spv')
pipeline.bind_storage_buffer(0, ShaderStage.COMPUTE, lambda: self.gradient_densities)
pipeline.bind_storage_buffer(1, ShaderStage.COMPUTE, lambda: self.rays)
pipeline.bind_storage_buffer(2, ShaderStage.COMPUTE, lambda: self.transmittances)
pipeline.bind_storage_buffer(3, ShaderStage.COMPUTE, lambda: self.gradient_transmittances)
pipeline.bind_uniform(4, ShaderStage.COMPUTE, lambda: self.medium_buffer)
pipeline.bind_constants(0, ShaderStage.COMPUTE,
grid_dim=glm.ivec3,
number_of_rays=int
)
pipeline.close()
self.pipeline = pipeline
self.set_medium(glm.vec3(1, 1, 1), 10, 0.875)
def __dispatch__(self):
with self.get_compute() as man:
man.clear_buffer(self.gradient_densities) # Zero grad
man.set_pipeline(self.pipeline)
man.update_sets(0)
ray_count = self.rays.size // (4 * 3 * 2)
man.update_constants(ShaderStage.COMPUTE,
grid_dim=self.grid_dim,
number_of_rays=ray_count
)
man.dispatch_threads_1D(ray_count)
class UpSampleGrid(Technique):
def __init__(self):
self.src_grid = None
self.dst_grid = None
self.src_grid_dim = glm.ivec3(0,0,0)
self.dst_grid_dim = glm.ivec3(0,0,0)
def set_src_grid(self, grid_dim, grid):
self.src_grid = grid
self.src_grid_dim = grid_dim
def set_dst_grid(self, grid_dim, grid):
self.dst_grid = grid
self.dst_grid_dim = grid_dim
def __setup__(self):
pipeline = self.create_compute_pipeline()
pipeline.load_compute_shader(__VOLUME_RECONSTRUCTION_SHADERS__+"/initialize.comp.spv")
pipeline.bind_storage_buffer(0, ShaderStage.COMPUTE, lambda: self.dst_grid)
pipeline.bind_storage_buffer(1, ShaderStage.COMPUTE, lambda: self.src_grid)
pipeline.bind_constants(0, ShaderStage.COMPUTE,
dst_grid_dim=glm.ivec3, rem0=float,
src_grid_dim=glm.ivec3, rem1=float
)
pipeline.close()
self.pipeline = pipeline
def __dispatch__(self):
with self.get_compute() as man:
man.set_pipeline(self.pipeline)
man.update_sets(0)
man.update_constants(ShaderStage.COMPUTE,
dst_grid_dim=self.dst_grid_dim,
src_grid_dim=self.src_grid_dim
)
man.dispatch_threads_1D(self.dst_grid_dim.x * self.dst_grid_dim.y * self.dst_grid_dim.z)
man.gpu_to_cpu(self.dst_grid)
| 45.983914 | 136 | 0.65007 | 16,832 | 0.981343 | 0 | 0 | 0 | 0 | 0 | 0 | 466 | 0.027169 |
55b6264d004418dd7f3a7bb277c12e4c208f7910
| 868 |
py
|
Python
|
basics/merge_sort.py
|
zi-NaN/algorithm_exercise
|
817916a62774145fe6387b715f76c5badbf99197
|
[
"MIT"
] | null | null | null |
basics/merge_sort.py
|
zi-NaN/algorithm_exercise
|
817916a62774145fe6387b715f76c5badbf99197
|
[
"MIT"
] | null | null | null |
basics/merge_sort.py
|
zi-NaN/algorithm_exercise
|
817916a62774145fe6387b715f76c5badbf99197
|
[
"MIT"
] | 1 |
2018-11-21T05:14:07.000Z
|
2018-11-21T05:14:07.000Z
|
def _merge_sort(arr:'list'):
if len(arr) <= 1:
return arr
begin = 0
end = len(arr)-1
middle = (begin+end)//2
first = _merge_sort(arr[begin:middle+1])
second = _merge_sort(arr[middle+1:end+1])
# merge
ptr1 = begin
ptr2 = middle+1
ptr = 0
while(ptr1<middle+1 and ptr2<end+1):
if first[ptr1] < second[ptr2-middle-1]:
arr[ptr] = first[ptr1]
ptr1 += 1
else:
arr[ptr] = second[ptr2-middle-1]
ptr2 += 1
ptr += 1
# print(ptr1, ptr2)
while(ptr1 < middle+1):
arr[ptr] = first[ptr1]
ptr1 += 1
ptr += 1
while(ptr2 < end+1):
arr[ptr] = second[ptr2-middle-1]
ptr2 += 1
ptr += 1
return arr
# test
if __name__ == '__main__':
print(_merge_sort([1, 3, 2]))
| 24.111111 | 48 | 0.483871 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 51 | 0.058756 |
55b6ea1d5523af9cb10562cdce01d07f5fcf19a0
| 2,605 |
py
|
Python
|
main.py
|
famaxth/Amazon-Parser
|
efc236459f2c9d723e02c87e5ebd3b1cf5a09e58
|
[
"MIT"
] | null | null | null |
main.py
|
famaxth/Amazon-Parser
|
efc236459f2c9d723e02c87e5ebd3b1cf5a09e58
|
[
"MIT"
] | null | null | null |
main.py
|
famaxth/Amazon-Parser
|
efc236459f2c9d723e02c87e5ebd3b1cf5a09e58
|
[
"MIT"
] | null | null | null |
# - *- coding: utf- 8 - *-
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
options = Options()
options.headless = True
path = 'path/to/chromedriver.exe' # You need to change this
def parser():
text = input("Hi! I will help you find information about the item on the Amazon website. Enter the text: \n\n")
if type(text) == str:
print("\nI have received your request. I'm starting to search...")
try:
driver = webdriver.Chrome(path, chrome_options=options)
driver.get('https://www.amazon.co.uk/')
search = driver.find_element_by_xpath('//*[@id="twotabsearchtextbox"]')
search.send_keys(text)
time.sleep(2)
search.send_keys(Keys.ENTER)
try:
title = driver.find_element_by_xpath('//*[@id="search"]/div[1]/div/div[1]/div/span[3]/div[2]/div[1]/div/span/div/div/div/div/div[2]/div[2]/div/div/div[1]/h2/a/span')
price_full = driver.find_element_by_xpath('//*[@id="search"]/div[1]/div/div[1]/div/span[3]/div[2]/div[1]/div/span/div/div/div/div/div[2]/div[2]/div/div/div[3]/div[1]/div/div[1]/div/a/span/span[2]/span[2]')
price_part = driver.find_element_by_xpath('//*[@id="search"]/div[1]/div/div[1]/div/span[3]/div[2]/div[1]/div/span/div/div/div/div/div[2]/div[2]/div/div/div[3]/div[1]/div/div[1]/div/a/span/span[2]/span[3]')
print("ᅠ")
print(f"<<The first item on the site for your request>>\n\nName: {title.text}\nPrice: {price_full.text}.{price_part.text} £\n\n")
except:
title = driver.find_element_by_xpath('//*[@id="search"]/div[1]/div/div[1]/div/span[3]/div[2]/div[2]/div/span/div/div/div/div/div[2]/div[1]/h2/a/span')
price_full = driver.find_element_by_xpath('//*[@id="search"]/div[1]/div/div[1]/div/span[3]/div[2]/div[2]/div/span/div/div/div/div/div[2]/div[3]/div/a/span[1]/span[2]/span[2]')
price_part = driver.find_element_by_xpath('//*[@id="search"]/div[1]/div/div[1]/div/span[3]/div[2]/div[2]/div/span/div/div/div/div/div[2]/div[3]/div/a/span[1]/span[2]/span[3]')
print("ᅠ")
print(f"<<The first item on the site for your request>>\n\nName: {title.text}\nPrice: {price_full.text}.{price_part.text} £\n\n")
except Exception as e:
print("Error! Nothing was found.")
else:
print("Error! The input value must be of the string type.")
parser()
| 56.630435 | 222 | 0.603839 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,456 | 0.557641 |
55b7410f25633189b2b806b878e6eeb2f52c7ecc
| 679 |
py
|
Python
|
Data_Science/Python-Estatistica/stats-ex8.py
|
maledicente/cursos
|
00ace48da7e48b04485e4ca97b3ca9ba5f33a283
|
[
"MIT"
] | 1 |
2021-05-03T22:59:38.000Z
|
2021-05-03T22:59:38.000Z
|
Data_Science/Python-Estatistica/stats-ex8.py
|
maledicente/cursos
|
00ace48da7e48b04485e4ca97b3ca9ba5f33a283
|
[
"MIT"
] | null | null | null |
Data_Science/Python-Estatistica/stats-ex8.py
|
maledicente/cursos
|
00ace48da7e48b04485e4ca97b3ca9ba5f33a283
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
def cinematica(t,s0,v0,a):
s = s0 + v0*t +(a*t*t/2.0)
return s
t = np.linspace(0, 5, 500)
s0 = 0.5
v0 = 2.0
a = 1.5
s_noise = 0.5 * np.random.normal(size=t.size)
s = cinematica(t,s0,v0,a)
sdata = s + s_noise
coefs, pcov = curve_fit(cinematica, t, sdata)
plt.plot(t, sdata, 'b-', label='Deslocamento')
plt.plot(t, cinematica(t, *coefs), 'r-',label='Função ajustada')
plt.xlabel('Tempo')
plt.ylabel('Deslocamento')
plt.title('Ajuste de curva')
plt.legend()
plt.show()
print("Espaço inicial= %f" %coefs[0])
print("Velocidade inicial= %f" %coefs[1])
print("Aceleração= %f" %coefs[2])
| 20.575758 | 64 | 0.673049 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 142 | 0.207602 |
55b9023ec88372bc40c1756a9431095fe3d52bb6
| 1,059 |
py
|
Python
|
xgboost_model.py
|
aravindpadman/Riiid-Answer-Correctness-Prediction
|
127037d372352af969fbfa335bff8bad84afb603
|
[
"MIT"
] | null | null | null |
xgboost_model.py
|
aravindpadman/Riiid-Answer-Correctness-Prediction
|
127037d372352af969fbfa335bff8bad84afb603
|
[
"MIT"
] | null | null | null |
xgboost_model.py
|
aravindpadman/Riiid-Answer-Correctness-Prediction
|
127037d372352af969fbfa335bff8bad84afb603
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import optuna
import xgboost
train = pd.read_csv("~/kaggledatasets/riiid-test-answer-prediction/train.csv", nrows=3e6,
dtype={'row_id': 'int64',
'timestamp': 'int64',
'user_id': 'int32',
'content_id': 'int16',
'content_type_id': 'int8',
'task_container_id': 'int16',
'user_answer': 'int8',
'answered_correctly': 'int8',
'prior_question_elapsed_time': 'float64',
'prior_question_had_explanation': 'boolean'},
)
class DataPipeline:
def __init__(self):
self.is_fitted = False
def fit(self, X, y=None):
self.is_fitted = True
raise NotImplementedError
def transform(self, X, y=None):
if self.is_fitted == True:
return 1
else:
pass
def func(self):
pass
| 28.621622 | 90 | 0.481586 | 331 | 0.312559 | 0 | 0 | 0 | 0 | 0 | 0 | 298 | 0.281398 |
55b93809c23b2f231b7acf1f7f0608d40af2f69c
| 1,828 |
py
|
Python
|
run.py
|
Gandor26/covid-open
|
50dcb773160edc16b107785a6bb32ae6f82fc9a7
|
[
"MIT"
] | 12 |
2020-10-29T20:52:26.000Z
|
2021-11-10T14:11:59.000Z
|
run.py
|
Gandor26/covid-open
|
50dcb773160edc16b107785a6bb32ae6f82fc9a7
|
[
"MIT"
] | 1 |
2021-02-16T09:48:39.000Z
|
2021-03-20T04:21:54.000Z
|
run.py
|
Gandor26/covid-open
|
50dcb773160edc16b107785a6bb32ae6f82fc9a7
|
[
"MIT"
] | 1 |
2020-12-05T15:51:43.000Z
|
2020-12-05T15:51:43.000Z
|
from typing import Optional, Dict
from pathlib import Path
from copy import deepcopy
from tqdm import tqdm
import torch as pt
from torch import Tensor, nn
from torch.optim import Adam
def train(
train_data: Dict[str, Tensor],
valid_data: Dict[str, Tensor],
model: nn.Module,
optimizer: Adam,
model_path: Path,
n_epochs: int,
test_size: Optional[int] = None,
log_step: int = 10,
patience: int = 10,
) -> None:
prog_bar = tqdm(total=n_epochs, unit='epoch')
best_valid = float('inf')
stop_counter = patience
for epoch in range(n_epochs):
prog_bar.update()
model = model.train()
loss_train, _ = model(**train_data, test_size=test_size)
optimizer.zero_grad()
loss_train.backward()
optimizer.step()
postfix = {'train_loss': loss_train.item()}
if (epoch+1) % log_step == 0:
if valid_data is not None:
model = model.eval()
with pt.no_grad():
loss_valid, _ = model(**valid_data)
loss_valid = loss_valid.item()
postfix['valid_loss'] = loss_valid
if loss_valid < best_valid:
best_valid = loss_valid
stop_counter = patience
else:
stop_counter -= 1
if stop_counter == 0:
break
prog_bar.set_postfix(**postfix)
prog_bar.close()
pt.save(model.state_dict(), model_path)
def inference(
data: Dict[str, Tensor],
model: nn.Module,
model_path: Path,
):
model.load_state_dict(pt.load(model_path))
model = model.eval()
with pt.no_grad():
_, pr = model(**data, test_size=0)
pr = pr.clamp_min_(0.0)
return pr
| 29.967213 | 64 | 0.565646 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 36 | 0.019694 |
55b9f31d49258d834824cb0904941fbaf15740b7
| 898 |
py
|
Python
|
authors/apps/profiles/models.py
|
andela/ah-backend-odin
|
0e9ef1a10c8a3f6736999a5111736f7bd7236689
|
[
"BSD-3-Clause"
] | null | null | null |
authors/apps/profiles/models.py
|
andela/ah-backend-odin
|
0e9ef1a10c8a3f6736999a5111736f7bd7236689
|
[
"BSD-3-Clause"
] | 43 |
2018-10-25T10:14:52.000Z
|
2022-03-11T23:33:46.000Z
|
authors/apps/profiles/models.py
|
andela/ah-backend-odin
|
0e9ef1a10c8a3f6736999a5111736f7bd7236689
|
[
"BSD-3-Clause"
] | 4 |
2018-10-29T07:04:58.000Z
|
2020-04-02T14:15:10.000Z
|
from django.db import models
from django.conf import settings
from django.db.models.signals import post_save
class Profile(models.Model):
username = models.CharField(max_length=255, primary_key=True)
bio = models.TextField(null=True, blank=True)
image = models.URLField(
null=True,
blank=True
)
user = models.OneToOneField(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.username
def user_was_created(sender, instance, created, ** kwargs):
""" Listen for when a user is creted and create a profile"""
created and Profile.objects.create(
user=instance, username=instance.username
)
post_save.connect(user_was_created, sender=settings.AUTH_USER_MODEL)
| 26.411765 | 68 | 0.711581 | 492 | 0.547884 | 0 | 0 | 0 | 0 | 0 | 0 | 60 | 0.066815 |
55bb1301f3cfe948295e5ac6f60a5f73e88c2c17
| 975 |
py
|
Python
|
python/StatsUtil.py
|
cbaldassano/Parcellating-connectivity
|
a98142a6b0dc10e9cb6f6e603cb5334996d018ec
|
[
"Unlicense"
] | 2 |
2020-08-17T21:06:28.000Z
|
2021-05-10T14:37:16.000Z
|
python/StatsUtil.py
|
cbaldassano/Parcellating-connectivity
|
a98142a6b0dc10e9cb6f6e603cb5334996d018ec
|
[
"Unlicense"
] | null | null | null |
python/StatsUtil.py
|
cbaldassano/Parcellating-connectivity
|
a98142a6b0dc10e9cb6f6e603cb5334996d018ec
|
[
"Unlicense"
] | 3 |
2018-07-06T17:08:47.000Z
|
2019-10-09T18:58:31.000Z
|
import numpy as np
# Compute normalized mutual information between two parcellations z1 and z2
def NMI(z1, z2):
N = len(z1)
assert N == len(z2)
p1 = np.bincount(z1)/N
p1[p1 == 0] = 1
H1 = (-p1*np.log(p1)).sum()
p2 = np.bincount(z2)/N
p2[p2 == 0] = 1
H2 = (-p2*np.log(p2)).sum()
joint = np.histogram2d(z1,z2,[range(0,z1.max()+2), range(0,z2.max()+2)],
normed=True)
joint_p = joint[0]
pdiv = joint_p/np.outer(p1,p2)
pdiv[joint_p == 0] = 1
MI = (joint_p*np.log(pdiv)).sum()
if MI == 0:
NMI = 0
else:
NMI = MI/np.sqrt(H1*H2)
return NMI
# (Approximately) return whether an array is symmetric
def CheckSymApprox(D):
# Random indices to check for symmetry
sym_sub = np.random.randint(D.shape[0], size=(1000,2))
a = np.ravel_multi_index((sym_sub[:,0],sym_sub[:,1]), dims=np.shape(D))
b = np.ravel_multi_index((sym_sub[:,1],sym_sub[:,0]), dims=np.shape(D))
sym = np.all(D.flat[a] == D.flat[b])
return sym
| 24.375 | 75 | 0.610256 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 167 | 0.171282 |
55bb525b00d7081596041b440b9ccf7eb9668e9b
| 31,939 |
py
|
Python
|
tests/test_model.py
|
olzama/xigt
|
60daa7201258ec02330264317e7a2315d929bd86
|
[
"MIT"
] | 17 |
2017-01-14T23:29:07.000Z
|
2022-02-23T08:50:09.000Z
|
tests/test_model.py
|
olzama/xigt
|
60daa7201258ec02330264317e7a2315d929bd86
|
[
"MIT"
] | 31 |
2015-02-11T17:25:59.000Z
|
2015-12-07T21:04:39.000Z
|
tests/test_model.py
|
olzama/xigt
|
60daa7201258ec02330264317e7a2315d929bd86
|
[
"MIT"
] | 4 |
2018-02-04T17:21:53.000Z
|
2021-11-29T16:33:45.000Z
|
import pytest
from xigt import XigtCorpus, Igt, Tier, Item, Metadata, Meta, MetaChild
from xigt.errors import XigtError, XigtStructureError
class TestMetadata():
md1 = Metadata()
m1 = Meta(id='meta1', text='meta')
md2 = Metadata(
id='md2',
type='basic',
attributes={'attr':'val'},
metas=[m1]
)
def test_init(self):
with pytest.raises(ValueError): Metadata(id='1') # invalid id
def test_id(self):
assert self.md1.id is None
assert self.md2.id is 'md2'
def test_type(self):
assert self.md1.type is None
assert self.md2.type == 'basic'
def test_metas(self):
assert self.md1.metas == []
assert len(self.md2.metas) == 1
assert self.md2[0].text == 'meta'
def test_attributes(self):
assert self.md1.attributes == dict()
assert self.md2.attributes == {'attr':'val'}
def test_eq(self):
assert self.md1 == self.md1
assert self.md2 == self.md2
assert self.md1 != self.md2
def test_getitem(self):
assert self.md2[0] == self.m1
assert self.md2['meta1'] == self.m1
assert self.md2['0'] == self.m1
with pytest.raises(IndexError): self.md2[1]
with pytest.raises(IndexError): self.md2['1']
with pytest.raises(KeyError): self.md2['m2']
def test_setitem(self):
md = Metadata(metas=[Meta(id='meta1'), Meta(id='meta2')])
md[0] = Meta(id='meta3')
assert len(md) == 2
assert md[0].id == 'meta3'
with pytest.raises(KeyError): md['meta1']
with pytest.raises(ValueError): md['meta2'] = Meta(id='meta2')
def test_delitem(self):
md = Metadata(metas=[Meta(id='meta1'), Meta(id='meta2')])
assert len(md) == 2
del md[0]
assert len(md) == 1
assert md[0].id == 'meta2'
with pytest.raises(KeyError): md['meta1']
del md['meta2']
assert len(md) == 0
with pytest.raises(KeyError): md['meta2']
def test_get(self):
assert self.md1.get(0) is None
assert self.md1.get('meta1') is None
assert self.md1.get('meta1', default=1) == 1
assert self.md2.get(0).id == 'meta1'
assert self.md2.get(1) is None
assert self.md2.get('meta1').id == 'meta1'
assert self.md2.get('meta1', default=Meta(id='meta2')).id == 'meta1'
def test_append(self):
md = Metadata()
with pytest.raises(XigtStructureError): md.append(Item())
with pytest.raises(XigtStructureError): md.append(Tier())
with pytest.raises(XigtStructureError): md.append(Igt())
with pytest.raises(XigtStructureError): md.append(XigtCorpus())
with pytest.raises(XigtStructureError): md.append(Metadata())
assert len(md) == 0
md.append(Meta(id='meta1'))
assert len(md) == 1
with pytest.raises(XigtError): md.append(Meta(id='meta1'))
md.append(Meta(id='meta2'))
assert len(md) == 2
assert md[0].id == 'meta1'
assert md[1].id == 'meta2'
def test_insert(self):
md = Metadata()
assert len(md) == 0
md.insert(0, Meta(id='meta1'))
assert len(md) == 1
with pytest.raises(XigtError): md.insert(0, Meta(id='meta1'))
md.insert(0, Meta(id='meta2'))
md.insert(100, Meta(id='meta3'))
assert len(md) == 3
assert md[0].id == 'meta2'
assert md[1].id == 'meta1'
assert md[2].id == 'meta3'
def test_extend(self):
md = Metadata()
assert len(md) == 0
md.extend([Meta(id='meta1')])
assert len(md) == 1
md.extend([])
assert len(md) == 1
md.extend([Meta(id='meta2'), Meta(id='meta3')])
assert len(md) == 3
assert md[0].id == 'meta1'
assert md[1].id == 'meta2'
assert md[2].id == 'meta3'
def test_remove(self):
md = Metadata(metas=[Meta(id='m1'), Meta(id='m2')])
assert len(md) == 2
md.remove(md[0])
assert len(md) == 1
assert md[0].id == 'm2'
with pytest.raises(KeyError): md['m1']
def test_clear(self):
md = Metadata()
md.extend([Meta(id='meta1'), Meta(id='meta2'), Meta(id='meta3')])
assert len(md) == 3
md.clear()
assert len(md) == 0
assert md.get(0) is None
assert md.get('meta1') is None
def test_get_attribute(self):
md = Metadata(
attributes={'one': 1, 'two': 2, '{http://namespace.org}three': 4},
nsmap={'pre': 'http://namespace.org'}
)
igt = Igt(metadata=[md], attributes={'three': 3})
assert md.get_attribute('one') == 1
assert md.get_attribute('two') == 2
assert md.get_attribute('three') is None
assert md.get_attribute('three', namespace='http://namespace.org') == 4
assert md.get_attribute('three', namespace='pre') == 4
assert md.get_attribute('three', inherit=True) == 3
assert md.get_attribute('three', namespace='pre', inherit=True) == 4
assert md.get_attribute('three', default=5) == 5
class TestMeta():
m1 = Meta()
m2 = Meta(
id='meta1',
type='metatype',
attributes={'one': 1},
text='metatext',
children=[MetaChild('child1'), MetaChild('child2')]
)
def test_init(self):
with pytest.raises(ValueError): Meta(id='1') # invalid id
def test_id(self):
assert self.m1.id is None
assert self.m2.id == 'meta1'
def test_type(self):
assert self.m1.type is None
assert self.m2.type == 'metatype'
def test_attributes(self):
assert self.m1.attributes == dict()
assert self.m2.attributes == {'one': 1}
def test_get_attribute(self):
assert self.m1.get_attribute('attr') is None
assert self.m1.get_attribute('attr', 1) == 1
assert self.m2.get_attribute('one') == 1
assert self.m2.get_attribute('two') is None
m = Meta(attributes={'one': 1})
md = Metadata(
attributes={'two': 2},
metas=[m]
)
assert m.get_attribute('two', inherit=True) == 2
def test_eq(self):
assert self.m1 == self.m1
assert self.m2 == self.m2
assert self.m1 != self.m2
def test_text(self):
assert self.m1.text is None
assert self.m2.text == 'metatext'
def test_children(self):
assert self.m1.children == []
assert len(self.m2.children) == 2
assert self.m2.children[0].name == 'child1'
assert self.m2.children[1].name == 'child2'
class TestMetaChild():
mc1 = MetaChild('childname')
mc2 = MetaChild(
'childname',
attributes={'id': 'mc2', 'type': 'childtype', 'one': 1},
text='childtext',
children=[MetaChild('grandchild1'), MetaChild('grandchild2')]
)
def test_init(self):
# name (i.e. tag in XML) is mandatory
with pytest.raises(TypeError): MetaChild()
# invalid names
with pytest.raises(ValueError): MetaChild('1')
with pytest.raises(ValueError): MetaChild('a:1')
# id and type not allowed as parameters (they can be attributes)
with pytest.raises(TypeError): MetaChild('mc0', id='mc1')
with pytest.raises(TypeError): MetaChild('mc0', type='childtype')
def test_name(self):
assert self.mc1.name == 'childname'
assert self.mc2.name == 'childname'
def test_attributes(self):
assert self.mc1.attributes == dict()
assert self.mc2.attributes == {'id': 'mc2', 'type': 'childtype', 'one': 1}
def test_get_attribute(self):
assert self.mc1.get_attribute('id') is None
assert self.mc1.get_attribute('attr') is None
assert self.mc1.get_attribute('attr', 1) == 1
assert self.mc2.get_attribute('id') == 'mc2'
assert self.mc2.get_attribute('type') == 'childtype'
assert self.mc2.get_attribute('one') == 1
assert self.mc2.get_attribute('two') is None
mc = MetaChild('childname', attributes={'one': 1})
m = Meta(children=[mc])
md = Metadata(
attributes={'two': 2},
metas=[m]
)
assert mc.get_attribute('two', inherit=True) == 2
def test_eq(self):
assert self.mc1 == self.mc1
assert self.mc2 == self.mc2
assert self.mc1 != self.mc2
def test_text(self):
assert self.mc1.text is None
assert self.mc2.text == 'childtext'
def test_children(self):
assert self.mc1.children == []
assert len(self.mc2.children) == 2
assert self.mc2.children[0].name == 'grandchild1'
assert self.mc2.children[1].name == 'grandchild2'
class TestItem():
# empty
i1 = Item()
# basic info
i2 = Item(
id='i2',
type='basic',
attributes={'attr':'val'},
text='text'
)
# alignment and content refs
i_ac = Item(
id='i_ac',
alignment='i2',
content='i2[0:2]'
)
# segmentation ref
i_s = Item(
id='i_s',
segmentation='i2[2:4]'
)
# override content ref with text
i_t = Item(
id='i_t',
content='i2',
text='something else'
)
# contextual structure
t_a = Tier(id='t_a', items=[i2])
t_b = Tier(id='t_b', items=[i_ac, i_t],
alignment='t_a', content='t_a')
t_c = Tier(id='t_c', items=[i_s], segmentation='t_a')
igt = Igt(tiers=[t_a, t_b, t_c])
xc = XigtCorpus(igts=[igt])
def test_init(self):
with pytest.raises(ValueError): Item(id='1') # invalid id
def test_id(self):
assert self.i1.id is None
assert self.i2.id == 'i2'
assert self.i_ac.id == 'i_ac'
assert self.i_s.id == 'i_s'
assert self.i_t.id == 'i_t'
def test_type(self):
assert self.i1.type is None
assert self.i2.type == 'basic'
assert self.i_ac.type is None
assert self.i_s.type is None
assert self.i_t.type is None
def test_parents(self):
assert self.i1.tier is None
assert self.i1.igt is None
assert self.i1.corpus is None
assert self.i2.tier is self.t_a
assert self.i2.igt is self.igt
assert self.i2.corpus is self.xc
assert self.i_ac.tier == self.t_b
assert self.i_ac.igt == self.igt
assert self.i_ac.corpus == self.xc
assert self.i_s.tier == self.t_c
assert self.i_s.igt == self.igt
assert self.i_s.corpus == self.xc
assert self.i_t.tier == self.t_b
assert self.i_t.igt == self.igt
assert self.i_t.corpus == self.xc
def test_eq(self):
assert self.i1 == self.i1
assert self.i2 == self.i2
assert self.i1 != self.i2
def test_attributes(self):
assert self.i1.attributes == dict()
assert self.i2.attributes == {'attr':'val'}
assert self.i_ac.attributes == {'alignment': 'i2', 'content': 'i2[0:2]'}
assert self.i_s.attributes == {'segmentation': 'i2[2:4]'}
assert self.i_t.attributes == {'content': 'i2'}
def test_reference_attributes(self):
# segmentation cannot co-occur with alignment or content
with pytest.raises(XigtError): Item(alignment='a1', segmentation='b1')
with pytest.raises(XigtError): Item(content='a1', segmentation='b1')
assert self.i1.alignment is None
assert self.i1.content is None
assert self.i1.segmentation is None
assert self.i2.alignment is None
assert self.i2.content is None
assert self.i2.segmentation is None
assert self.i_ac.alignment == 'i2'
assert self.i_ac.content == 'i2[0:2]'
assert self.i_ac.segmentation is None
assert self.i_s.alignment is None
assert self.i_s.content is None
assert self.i_s.segmentation == 'i2[2:4]'
assert self.i_t.alignment == None
assert self.i_t.content == 'i2'
assert self.i_t.segmentation == None
def test_text(self):
assert self.i1.text is None
assert self.i2.text == 'text'
assert self.i_ac.text is None
assert self.i_s.text is None
assert self.i_t.text == 'something else'
def test_value(self):
assert self.i1.value() is None
assert self.i2.value() == 'text'
assert self.i_ac.value() == 'te'
assert self.i_s.value() == 'xt'
assert self.i_t.value() == 'something else'
def test_resolve_ref(self):
# item has no reference attribute
b1 = Item(id='b1')
with pytest.raises(KeyError): b1.resolve_ref('alignment')
# has a reference attribute, but is not contained by a tier
b1.alignment = 'a1'
with pytest.raises(XigtStructureError): b1.resolve_ref('alignment')
# item in tier, but tier has no reference attribute
t_b = Tier(id='b', items=[b1])
with pytest.raises(KeyError): b1.resolve_ref('alignment')
# tier has reference attribute, but is not contained by an Igt
t_b.alignment = 'a'
with pytest.raises(XigtStructureError): b1.resolve_ref('alignment')
# item in IGT, but referred tier doesn't exist
igt = Igt(tiers=[t_b])
with pytest.raises(XigtStructureError): b1.resolve_ref('alignment')
# referred tier exists, but has no item referred by item's alignment
t_a = Tier(id='a')
igt.append(t_a)
with pytest.raises(XigtStructureError): b1.resolve_ref('alignment')
# referred item exists, but has no value (which resolves to '')
a1 = Item(id='a1')
t_a.append(a1)
assert b1.resolve_ref('alignment') == ''
# referred item has a value
a1.text = 'text'
assert b1.resolve_ref('alignment') == 'text'
# stored item tests
with pytest.raises(KeyError): self.i1.resolve_ref('alignment')
with pytest.raises(KeyError): self.i2.resolve_ref('alignment')
assert self.i_ac.resolve_ref('alignment') == 'text'
assert self.i_ac.resolve_ref('content') == 'te'
assert self.i_s.resolve_ref('segmentation') == 'xt'
assert self.i_t.resolve_ref('content') == 'text'
def test_span(self):
# sub-spans of null content is also null content
assert self.i1.span(0,1) is None
assert self.i2.span(0,1) == 't'
assert self.i_ac.span(1,2) == 'e'
assert self.i_s.span(1,2) == 't'
assert self.i_t.span(1,2) == 'o'
def test_get_attribute(self):
i = Item(id='i1')
assert i.get_attribute('attr') == None
assert i.get_attribute('attr', 1) == 1
i.attributes['attr'] = 'val'
assert i.get_attribute('attr', 1) == 'val'
assert i.get_attribute('abc', inherit=True) == None
t = Tier(id='t', items=[i], attributes={'abc': 'def'})
assert i.get_attribute('abc', inherit=True) == 'def'
assert self.i1.get_attribute('attr') == None
assert self.i1.get_attribute('attr', 1) == 1
assert self.i2.get_attribute('attr') == 'val'
assert self.i2.get_attribute('attr', 1) == 'val'
assert self.i_ac.get_attribute('alignment') == 'i2'
class TestTier():
t1 = Tier()
i1 = Item(id='t1')
i2 = Item(id='t2')
t2 = Tier(
id='t',
type='basic',
attributes={'attr':'val'},
metadata=[Metadata(type='meta', metas=[Meta(text='meta')])],
items=[i1, i2]
)
def test_init(self):
with pytest.raises(ValueError): Tier(id='1') # invalid id
# don't allow multiple items with the same ID
with pytest.raises(XigtError): Tier(items=[Item(id='i1'),
Item(id='i1')])
def test_id(self):
assert self.t1.id is None
assert self.t2.id == 't'
def test_type(self):
assert self.t1.type is None
assert self.t2.type == 'basic'
def test_items(self):
assert len(self.t1.items) == 0
assert self.t1.items == []
assert len(self.t2.items) == 2
# contained Items should now have their tier specified
for i in self.t2.items:
assert i.tier is self.t2
def test_parents(self):
assert self.t1.igt is None
assert self.t1.corpus is None
assert self.t2.igt is None
assert self.t2.corpus is None
def test_metadata(self):
assert len(self.t1.metadata) == 0
assert self.t2.metadata[0].type == 'meta'
assert len(self.t2.metadata[0].metas) == 1
assert self.t2.metadata[0][0].text == 'meta'
def test_attributes(self):
assert self.t1.attributes == dict()
assert self.t2.attributes == {'attr':'val'}
def test_reference_attributes(self):
# segmentation cannot co-occur with alignment or content
with pytest.raises(XigtError): Tier(alignment='a1', segmentation='b1')
with pytest.raises(XigtError): Tier(content='a1', segmentation='b1')
assert self.t1.alignment is None
assert self.t1.content is None
assert self.t1.segmentation is None
assert self.t2.alignment is None
assert self.t2.content is None
assert self.t2.segmentation is None
def test_eq(self):
assert self.t1 == self.t1
assert self.t2 == self.t2
assert self.t1 != self.t2
def test_getitem(self):
assert self.t2[0] == self.i1
assert self.t2['t1'] == self.i1
assert self.t2['0'] == self.i1
assert self.t2[1] == self.i2
with pytest.raises(IndexError): self.t2[2]
with pytest.raises(IndexError): self.t2['2']
with pytest.raises(KeyError): self.t2['t3']
def test_setitem(self):
t = Tier(items=[Item(id='a1'), Item(id='a2')])
t[0] = Item(id='a3')
assert len(t) == 2
assert t[0].id == 'a3'
with pytest.raises(KeyError): t['a1']
with pytest.raises(ValueError): t['a2'] = Item(id='a3')
def test_delitem(self):
t = Tier(items=[Item(id='a1'), Item(id='a2')])
assert len(t) == 2
del t[0]
assert len(t) == 1
assert t[0].id == 'a2'
with pytest.raises(KeyError): t['a1']
del t['a2']
assert len(t) == 0
with pytest.raises(KeyError): t['a2']
def test_get(self):
assert self.t1.get(0) is None
assert self.t1.get('t') is None
assert self.t1.get('t', default=1) == 1
assert self.t2.get(0).id == 't1'
assert self.t2.get(2) is None
assert self.t2.get('t1').id == 't1'
assert self.t2.get('t1', default=Item(id='x')).id == 't1'
def test_append(self):
t = Tier()
with pytest.raises(XigtStructureError): t.append(Tier())
with pytest.raises(XigtStructureError): t.append(Igt())
with pytest.raises(XigtStructureError): t.append(XigtCorpus())
with pytest.raises(XigtStructureError): t.append(Metadata())
with pytest.raises(XigtStructureError): t.append(Meta())
assert len(t) == 0
t.append(Item(id='t1'))
assert len(t) == 1
with pytest.raises(XigtError): t.append(Item(id='t1'))
t.append(Item(id='t2'))
assert len(t) == 2
assert t[0].id == 't1'
assert t[1].id == 't2'
def test_insert(self):
t = Tier()
assert len(t) == 0
t.insert(0, Item(id='t1'))
assert len(t) == 1
with pytest.raises(XigtError): t.insert(0, Item(id='t1'))
t.insert(0, Item(id='t2'))
t.insert(100, Item(id='t3'))
assert len(t) == 3
assert t[0].id == 't2'
assert t[1].id == 't1'
assert t[2].id == 't3'
def test_extend(self):
t = Tier()
assert len(t) == 0
t.extend([Item(id='t1')])
assert len(t) == 1
t.extend([])
assert len(t) == 1
t.extend([Item(id='t2'), Item(id='t3')])
assert len(t) == 3
assert t[0].id == 't1'
assert t[1].id == 't2'
assert t[2].id == 't3'
def test_remove(self):
t = Tier(items=[Item(id='i1'), Item(id='i2')])
assert len(t) == 2
t.remove(t[0])
assert len(t) == 1
assert t[0].id == 'i2'
with pytest.raises(KeyError): t['i1']
def test_clear(self):
t = Tier()
t.extend([Item(id='t1'), Item(id='t2'), Item(id='t3')])
assert len(t) == 3
t.clear()
assert len(t) == 0
assert t.get(0) is None
assert t.get('t1') is None
def test_get_attribute(self):
t = Tier(id='t', attributes={'one': 1, 'two': 2})
igt = Igt(tiers=[t], attributes={'three': 3})
assert t.get_attribute('one') == 1
assert t.get_attribute('two') == 2
assert t.get_attribute('three') is None
assert t.get_attribute('three', inherit=True) == 3
assert t.get_attribute('three', default=4) == 4
class TestIgt():
i1 = Igt()
t1 = Tier(id='a', items=[Item(id='a1'), Item(id='a2')])
t2 = Tier(id='b', items=[Item(id='b1'), Item(id='b2')])
i2 = Igt(
id='i1',
type='basic',
attributes={'attr':'val'},
metadata=[Metadata(type='meta',
metas=[Meta(text='meta')])],
tiers=[t1, t2]
)
def test_init(self):
with pytest.raises(ValueError): Igt(id='1') # invalid id
# don't allow multiple tiers with the same ID
with pytest.raises(XigtError): Igt(tiers=[Tier(id='a'), Tier(id='a')])
def test_id(self):
assert self.i1.id is None
assert self.i2.id == 'i1'
def test_type(self):
assert self.i1.type is None
assert self.i2.type == 'basic'
def test_tiers(self):
assert len(self.i1.tiers) == 0
assert len(self.i2.tiers) == 2
# contained Tiers should now have their igt specified
for t in self.i2.tiers:
assert t.igt is self.i2
def test_parents(self):
assert self.i1.corpus is None
assert self.i2.corpus is None
def test_metadata(self):
assert len(self.i1.metadata) == 0
assert self.i2.metadata[0].type == 'meta'
assert len(self.i2.metadata[0].metas) == 1
assert self.i2.metadata[0][0].text == 'meta'
def test_attributes(self):
assert self.i1.attributes == dict()
assert self.i2.attributes == {'attr':'val'}
def test_eq(self):
assert self.i1 == self.i1
assert self.i2 == self.i2
assert self.i1 != self.i2
def test_getitem(self):
assert self.i2[0] == self.t1
assert self.i2['a'] == self.t1
assert self.i2['0'] == self.t1
assert self.i2[1] == self.t2
with pytest.raises(IndexError): self.i2[2]
with pytest.raises(IndexError): self.i2['2']
with pytest.raises(KeyError): self.i2['c']
def test_setitem(self):
igt = Igt(tiers=[Tier(id='a'), Tier(id='b')])
igt[0] = Tier(id='c')
assert len(igt) == 2
assert igt[0].id == 'c'
with pytest.raises(KeyError): igt['a']
with pytest.raises(ValueError): igt['b'] = Tier(id='d')
def test_delitem(self):
igt = Igt(tiers=[Tier(id='a'), Tier(id='b')])
assert len(igt) == 2
del igt[0]
assert len(igt) == 1
assert igt[0].id == 'b'
with pytest.raises(KeyError): igt['a']
del igt['b']
assert len(igt) == 0
with pytest.raises(KeyError): igt['b']
def test_get(self):
assert self.i1.get(0) is None
assert self.i1.get('t') is None
assert self.i1.get('t', default=1) == 1
assert self.i2.get(0).id == 'a'
assert self.i2.get(3) is None
assert self.i2.get('a').id == 'a'
assert self.i2.get('a', default=Tier(id='x')).id == 'a'
def test_get_item(self):
assert self.i1.get_item('a') is None
assert self.i1.get_item('a1') is None
assert self.i2.get_item('a') is None
assert self.i2.get_item('a1').id == 'a1'
assert self.i2.get_item('b2').id == 'b2'
def test_get_any(self):
assert self.i1.get_any('a') is None
assert self.i1.get_any('a1') is None
assert self.i2.get_any('a').id is 'a'
assert self.i2.get_any('a1').id == 'a1'
assert self.i2.get_any('b2').id == 'b2'
def test_append(self):
igt = Igt()
with pytest.raises(XigtStructureError): igt.append(Item())
with pytest.raises(XigtStructureError): igt.append(Igt())
with pytest.raises(XigtStructureError): igt.append(XigtCorpus())
with pytest.raises(XigtStructureError): igt.append(Metadata())
with pytest.raises(XigtStructureError): igt.append(Meta())
assert len(igt) == 0
igt.append(Tier(id='t'))
assert len(igt) == 1
with pytest.raises(XigtError): igt.append(Tier(id='t'))
igt.append(Tier(id='x'))
assert len(igt) == 2
assert igt[0].id == 't'
assert igt[1].id == 'x'
def test_insert(self):
igt = Igt()
assert len(igt) == 0
igt.insert(0, Tier(id='t'))
assert len(igt) == 1
with pytest.raises(XigtError): igt.insert(0, Tier(id='t'))
igt.insert(0, Tier(id='x'))
igt.insert(100, Tier(id='y'))
assert len(igt) == 3
assert igt[0].id == 'x'
assert igt[1].id == 't'
assert igt[2].id == 'y'
def test_extend(self):
igt = Igt()
assert len(igt) == 0
igt.extend([Tier(id='t')])
assert len(igt) == 1
igt.extend([])
assert len(igt) == 1
igt.extend([Tier(id='x'), Tier(id='y')])
assert len(igt) == 3
assert igt[0].id == 't'
assert igt[1].id == 'x'
assert igt[2].id == 'y'
def test_remove(self):
igt = Igt(tiers=[Tier(id='a'), Tier(id='b')])
assert len(igt) == 2
igt.remove(igt[0])
assert len(igt) == 1
assert igt[0].id == 'b'
with pytest.raises(KeyError): igt['a']
def test_clear(self):
igt = Igt()
igt.extend([Tier(id='t'), Tier(id='x'), Tier(id='y')])
assert len(igt) == 3
igt.clear()
assert len(igt) == 0
assert igt.get(0) is None
assert igt.get('t') is None
def test_get_attribute(self):
igt = Igt(id='i1', attributes={'one': 1, 'two': 2})
xc = XigtCorpus(igts=[igt], attributes={'three': 3})
assert igt.get_attribute('one') == 1
assert igt.get_attribute('two') == 2
assert igt.get_attribute('three') is None
assert igt.get_attribute('three', inherit=True) == 3
assert igt.get_attribute('three', default=4) == 4
class TestXigtCorpus():
c1 = XigtCorpus()
i1 = Igt(id='i1')
i2 = Igt(id='i2')
c2 = XigtCorpus(
id='xc1',
type='basic',
attributes={'attr':'val'},
metadata=[Metadata(type='meta', metas=[Meta(text='meta')])],
igts=[i1, i2]
)
def test_init(self):
with pytest.raises(ValueError): XigtCorpus(id='1') # invalid id
# don't allow multiple igts with the same ID
with pytest.raises(XigtError): XigtCorpus(igts=[Igt(id='i1'),
Igt(id='i1')])
def test_id(self):
assert self.c1.id is None
assert self.c2.id == 'xc1'
def test_type(self):
assert self.c1.type is None
assert self.c2.type is 'basic'
def test_igts(self):
assert len(self.c1.igts) == 0
assert len(self.c2.igts) == 2
# contained Igts should now have their corpus specified
for i in self.c2.igts:
assert i.corpus is self.c2
def test_attributes(self):
assert self.c1.attributes == dict()
assert self.c2.attributes == {'attr':'val'}
def test_metadata(self):
assert len(self.c1.metadata) == 0
assert self.c2.metadata[0].type == 'meta'
assert len(self.c2.metadata[0].metas) == 1
assert self.c2.metadata[0][0].text == 'meta'
def test_eq(self):
assert self.c1 == self.c1
assert self.c2 == self.c2
assert self.c1 != self.c2
def test_getitem(self):
assert self.c2[0] == self.i1
assert self.c2['i1'] == self.i1
assert self.c2['0'] == self.i1
assert self.c2[1] == self.i2
with pytest.raises(IndexError): self.c2[2]
with pytest.raises(IndexError): self.c2['2']
with pytest.raises(KeyError): self.c2['i3']
def test_setitem(self):
xc = XigtCorpus(igts=[Igt(id='i1'), Igt(id='i2')])
xc[0] = Igt(id='i3')
assert len(xc) == 2
assert xc[0].id == 'i3'
with pytest.raises(KeyError): xc['i1']
with pytest.raises(ValueError): xc['i2'] = Igt(id='i3')
def test_delitem(self):
xc = XigtCorpus(igts=[Igt(id='i1'), Igt(id='i2')])
assert len(xc) == 2
del xc[0]
assert len(xc) == 1
assert xc[0].id == 'i2'
with pytest.raises(KeyError): xc['i1']
del xc['i2']
assert len(xc) == 0
with pytest.raises(KeyError): xc['i2']
def test_get(self):
assert self.c1.get(0) is None
assert self.c1.get('i1') is None
assert self.c1.get('i1', default=1) == 1
assert self.c2.get(0).id == 'i1'
assert self.c2.get(3) is None
assert self.c2.get('i1').id == 'i1'
assert self.c2.get('i1', default=Igt(id='i3')).id == 'i1'
def test_append(self):
xc = XigtCorpus()
with pytest.raises(XigtStructureError): xc.append(Item())
with pytest.raises(XigtStructureError): xc.append(Tier())
with pytest.raises(XigtStructureError): xc.append(XigtCorpus())
with pytest.raises(XigtStructureError): xc.append(Metadata())
with pytest.raises(XigtStructureError): xc.append(Meta())
assert len(xc) == 0
xc.append(Igt(id='i1'))
assert len(xc) == 1
with pytest.raises(XigtError): xc.append(Igt(id='i1'))
xc.append(Igt(id='i2'))
assert len(xc) == 2
assert xc[0].id == 'i1'
assert xc[1].id == 'i2'
def test_insert(self):
xc = XigtCorpus()
assert len(xc) == 0
xc.insert(0, Igt(id='i1'))
assert len(xc) == 1
with pytest.raises(XigtError): xc.insert(0, Igt(id='i1'))
xc.insert(0, Igt(id='i2'))
xc.insert(100, Igt(id='i3'))
assert len(xc) == 3
assert xc[0].id == 'i2'
assert xc[1].id == 'i1'
assert xc[2].id == 'i3'
def test_extend(self):
xc = XigtCorpus()
assert len(xc) == 0
xc.extend([Igt(id='i1')])
assert len(xc) == 1
xc.extend([])
assert len(xc) == 1
xc.extend([Igt(id='i2'), Igt(id='i3')])
assert len(xc) == 3
assert xc[0].id == 'i1'
assert xc[1].id == 'i2'
assert xc[2].id == 'i3'
def test_remove(self):
xc = XigtCorpus(igts=[Igt(id='i1'), Igt(id='i2')])
assert len(xc) == 2
xc.remove(xc[0])
assert len(xc) == 1
assert xc[0].id == 'i2'
with pytest.raises(KeyError): xc['i1']
def test_clear(self):
xc = XigtCorpus()
xc.extend([Igt(id='i1'), Igt(id='i2'), Igt(id='i3')])
assert len(xc) == 3
xc.clear()
assert len(xc) == 0
assert xc.get(0) is None
assert xc.get('i1') is None
def test_get_attribute(self):
xc = XigtCorpus(attributes={'one': 1, 'two': 2})
assert xc.get_attribute('one') == 1
assert xc.get_attribute('two') == 2
assert xc.get_attribute('three') is None
assert xc.get_attribute('three', inherit=True) == None
| 31.312745 | 82 | 0.558032 | 31,778 | 0.994959 | 0 | 0 | 0 | 0 | 0 | 0 | 3,868 | 0.121106 |
55bbc7c595e31e90737d59f74df6dbd5b4ab1f77
| 121 |
py
|
Python
|
api_v2/views.py
|
LonelVino/club-chinois-home
|
3e2ecc6728f0b7349adfe10e515e3f5908d09c9d
|
[
"MIT"
] | null | null | null |
api_v2/views.py
|
LonelVino/club-chinois-home
|
3e2ecc6728f0b7349adfe10e515e3f5908d09c9d
|
[
"MIT"
] | null | null | null |
api_v2/views.py
|
LonelVino/club-chinois-home
|
3e2ecc6728f0b7349adfe10e515e3f5908d09c9d
|
[
"MIT"
] | null | null | null |
from django.http import JsonResponse
def names(request):
return JsonResponse({'names': ['William', 'Rod', 'Grant']})
| 30.25 | 63 | 0.702479 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.231405 |
55bbcfb0657fa9d696e2cb0dec828c20a4c0e1c7
| 156 |
py
|
Python
|
rpi/LiDAR.py
|
shadowsburney/LiDAR
|
f88cca9fbdae2d0dbe47a6e06cd965a2aaa82a0a
|
[
"MIT"
] | null | null | null |
rpi/LiDAR.py
|
shadowsburney/LiDAR
|
f88cca9fbdae2d0dbe47a6e06cd965a2aaa82a0a
|
[
"MIT"
] | null | null | null |
rpi/LiDAR.py
|
shadowsburney/LiDAR
|
f88cca9fbdae2d0dbe47a6e06cd965a2aaa82a0a
|
[
"MIT"
] | null | null | null |
from sensor import Sensor
from stepper import Stepper
sensor = Sensor()
stepper = Stepper(100)
#stepper.start()
while True:
print(sensor.measure())
| 13 | 27 | 0.730769 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 16 | 0.102564 |
55bc6334d6372aec8c3f097cf63d231873013d04
| 1,351 |
py
|
Python
|
peering/migrations/0051_auto_20190818_1816.py
|
schiederme/peering-manager
|
2d29427fd4f2b91a5208f31e1a7ad69eaf82924c
|
[
"Apache-2.0"
] | 173 |
2020-08-08T15:38:08.000Z
|
2022-03-21T11:35:25.000Z
|
peering/migrations/0051_auto_20190818_1816.py
|
schiederme/peering-manager
|
2d29427fd4f2b91a5208f31e1a7ad69eaf82924c
|
[
"Apache-2.0"
] | 247 |
2017-12-26T12:55:34.000Z
|
2020-08-08T11:57:35.000Z
|
peering/migrations/0051_auto_20190818_1816.py
|
schiederme/peering-manager
|
2d29427fd4f2b91a5208f31e1a7ad69eaf82924c
|
[
"Apache-2.0"
] | 63 |
2017-10-13T06:46:05.000Z
|
2020-08-08T00:41:57.000Z
|
# Generated by Django 2.2.4 on 2019-08-18 16:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("peering", "0050_auto_20190806_2159")]
operations = [
migrations.RenameField(
model_name="autonomoussystem", old_name="comment", new_name="comments"
),
migrations.RenameField(
model_name="bgpgroup", old_name="comment", new_name="comments"
),
migrations.RenameField(
model_name="community", old_name="comment", new_name="comments"
),
migrations.RenameField(
model_name="directpeeringsession", old_name="comment", new_name="comments"
),
migrations.RenameField(
model_name="internetexchange", old_name="comment", new_name="comments"
),
migrations.RenameField(
model_name="internetexchangepeeringsession",
old_name="comment",
new_name="comments",
),
migrations.RenameField(
model_name="router", old_name="comment", new_name="comments"
),
migrations.RenameField(
model_name="routingpolicy", old_name="comment", new_name="comments"
),
migrations.RenameField(
model_name="template", old_name="comment", new_name="comments"
),
]
| 32.95122 | 86 | 0.6151 | 1,266 | 0.937084 | 0 | 0 | 0 | 0 | 0 | 0 | 396 | 0.293116 |
55beea09bbe265b3360f6e0c1ea21bb757b756fd
| 7,784 |
py
|
Python
|
pysnmp-with-texts/HP-ICF-IPV6-RA-GUARD-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 8 |
2019-05-09T17:04:00.000Z
|
2021-06-09T06:50:51.000Z
|
pysnmp-with-texts/HP-ICF-IPV6-RA-GUARD-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 4 |
2019-05-31T16:42:59.000Z
|
2020-01-31T21:57:17.000Z
|
pysnmp-with-texts/HP-ICF-IPV6-RA-GUARD-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10 |
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module HP-ICF-IPV6-RA-GUARD-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HP-ICF-IPV6-RA-GUARD-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:34:21 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, SingleValueConstraint, ValueRangeConstraint, ConstraintsIntersection, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "ConstraintsUnion")
hpSwitch, = mibBuilder.importSymbols("HP-ICF-OID", "hpSwitch")
ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex")
ObjectGroup, NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "NotificationGroup", "ModuleCompliance")
MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, Counter32, Gauge32, Counter64, IpAddress, TimeTicks, Integer32, iso, Bits, ObjectIdentity, Unsigned32, ModuleIdentity, MibIdentifier = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "Counter32", "Gauge32", "Counter64", "IpAddress", "TimeTicks", "Integer32", "iso", "Bits", "ObjectIdentity", "Unsigned32", "ModuleIdentity", "MibIdentifier")
DisplayString, TextualConvention, TruthValue = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention", "TruthValue")
hpicfIpv6RAGuard = ModuleIdentity((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 87))
hpicfIpv6RAGuard.setRevisions(('2011-03-16 05:24',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: hpicfIpv6RAGuard.setRevisionsDescriptions(('Initial revision.',))
if mibBuilder.loadTexts: hpicfIpv6RAGuard.setLastUpdated('201103160524Z')
if mibBuilder.loadTexts: hpicfIpv6RAGuard.setOrganization('Hewlett-Packard Company HP Networking')
if mibBuilder.loadTexts: hpicfIpv6RAGuard.setContactInfo('Hewlett-Packard Company 8000 Foothills Blvd. Roseville, CA 95747')
if mibBuilder.loadTexts: hpicfIpv6RAGuard.setDescription('This MIB module contains HP proprietary objects for managing RA Guard.')
hpicfIpv6RAGuardObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 87, 1))
hpicfIpv6RAGuardConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 87, 1, 1))
hpicfRAGuardPortTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 87, 1, 1, 1), )
if mibBuilder.loadTexts: hpicfRAGuardPortTable.setStatus('current')
if mibBuilder.loadTexts: hpicfRAGuardPortTable.setDescription('Per-interface configuration for RA Guard. Ra Guard is used to block IPv6 router advertisements and ICMPv6 router redirects. The log option is to enable debug logging for troubleshooting. It uses a lot of CPU and should be used only for short periods of time. To display debug logging, use debug security ra-guard command.')
hpicfRAGuardPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 87, 1, 1, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: hpicfRAGuardPortEntry.setStatus('current')
if mibBuilder.loadTexts: hpicfRAGuardPortEntry.setDescription('RA Guard configuration information for a single port.')
hpicfRAGuardPortBlocked = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 87, 1, 1, 1, 1, 1), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpicfRAGuardPortBlocked.setStatus('current')
if mibBuilder.loadTexts: hpicfRAGuardPortBlocked.setDescription('This object indicates whether this port is blocked for Router Advertisements and Redirects.')
hpicfRAGuardPortBlockedRAs = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 87, 1, 1, 1, 1, 2), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfRAGuardPortBlockedRAs.setStatus('current')
if mibBuilder.loadTexts: hpicfRAGuardPortBlockedRAs.setDescription('This number of Router Advertisements blocked for the port.')
hpicfRAGuardPortBlockedRedirs = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 87, 1, 1, 1, 1, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfRAGuardPortBlockedRedirs.setStatus('current')
if mibBuilder.loadTexts: hpicfRAGuardPortBlockedRedirs.setDescription('This number of Router Redirects blocked for the port.')
hpicfRAGuardPortLog = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 87, 1, 1, 1, 1, 4), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpicfRAGuardPortLog.setStatus('current')
if mibBuilder.loadTexts: hpicfRAGuardPortLog.setDescription('Whether to log RAs and Redirects for the port. The log option is to enable debug logging for troubleshooting. It uses a lot of CPU and should be used only for short periods of time.')
hpicfRAGuardLastErrorCode = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 87, 1, 1, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("noError", 1), ("insufficientHardwareResources", 2), ("genericError", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfRAGuardLastErrorCode.setStatus('current')
if mibBuilder.loadTexts: hpicfRAGuardLastErrorCode.setDescription('Error code of the last error that occurred. A non-zero value indicates that the last operation performed by this instance did not succeed.')
hpicfIpv6RAGuardConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 87, 2))
hpicfIpv6RAGuardCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 87, 2, 1))
hpicfIpv6RAGuardGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 87, 2, 2))
hpicfIpv6RAGuardGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 87, 2, 2, 1)).setObjects(("HP-ICF-IPV6-RA-GUARD-MIB", "hpicfRAGuardPortBlocked"), ("HP-ICF-IPV6-RA-GUARD-MIB", "hpicfRAGuardPortBlockedRAs"), ("HP-ICF-IPV6-RA-GUARD-MIB", "hpicfRAGuardPortBlockedRedirs"), ("HP-ICF-IPV6-RA-GUARD-MIB", "hpicfRAGuardPortLog"), ("HP-ICF-IPV6-RA-GUARD-MIB", "hpicfRAGuardLastErrorCode"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfIpv6RAGuardGroup = hpicfIpv6RAGuardGroup.setStatus('current')
if mibBuilder.loadTexts: hpicfIpv6RAGuardGroup.setDescription('A collection of objects providing configuration for Ipv6 RA Guard.')
hpicfIpv6RAGuardCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 87, 2, 1, 1)).setObjects(("HP-ICF-IPV6-RA-GUARD-MIB", "hpicfIpv6RAGuardGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfIpv6RAGuardCompliance = hpicfIpv6RAGuardCompliance.setStatus('current')
if mibBuilder.loadTexts: hpicfIpv6RAGuardCompliance.setDescription('The compliance statement for devices support of HP-ICF-IPV6-RA-GUARD-MIB.')
mibBuilder.exportSymbols("HP-ICF-IPV6-RA-GUARD-MIB", hpicfIpv6RAGuardConfig=hpicfIpv6RAGuardConfig, hpicfRAGuardPortLog=hpicfRAGuardPortLog, hpicfIpv6RAGuardCompliances=hpicfIpv6RAGuardCompliances, hpicfIpv6RAGuardGroup=hpicfIpv6RAGuardGroup, hpicfIpv6RAGuardCompliance=hpicfIpv6RAGuardCompliance, hpicfRAGuardPortEntry=hpicfRAGuardPortEntry, hpicfIpv6RAGuardObjects=hpicfIpv6RAGuardObjects, PYSNMP_MODULE_ID=hpicfIpv6RAGuard, hpicfRAGuardPortBlocked=hpicfRAGuardPortBlocked, hpicfRAGuardPortTable=hpicfRAGuardPortTable, hpicfRAGuardPortBlockedRAs=hpicfRAGuardPortBlockedRAs, hpicfRAGuardPortBlockedRedirs=hpicfRAGuardPortBlockedRedirs, hpicfRAGuardLastErrorCode=hpicfRAGuardLastErrorCode, hpicfIpv6RAGuardConformance=hpicfIpv6RAGuardConformance, hpicfIpv6RAGuardGroups=hpicfIpv6RAGuardGroups, hpicfIpv6RAGuard=hpicfIpv6RAGuard)
| 127.606557 | 828 | 0.776208 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,776 | 0.356629 |
55bfeb24ff5584cd80bb449c46db4ec74f53fd3c
| 102 |
py
|
Python
|
API/utils/tokenizer.py
|
accordproject/labs-cicero-classify
|
3a52ebaf45252515c417bf94a05e33fc1c2628b8
|
[
"Apache-2.0"
] | 2 |
2021-07-07T01:06:18.000Z
|
2021-11-12T18:54:21.000Z
|
API/utils/tokenizer.py
|
accordproject/labs_cicero_classify
|
3a52ebaf45252515c417bf94a05e33fc1c2628b8
|
[
"Apache-2.0"
] | 3 |
2021-06-25T12:40:23.000Z
|
2022-02-14T13:42:30.000Z
|
API/utils/tokenizer.py
|
accordproject/labs_cicero_classify
|
3a52ebaf45252515c417bf94a05e33fc1c2628b8
|
[
"Apache-2.0"
] | null | null | null |
from transformers import RobertaTokenizer
tokenizer = RobertaTokenizer.from_pretrained("roberta-base")
| 51 | 60 | 0.872549 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 14 | 0.137255 |
55c01bcc5785d0af3f6437a91b853450fda2bb63
| 2,531 |
py
|
Python
|
gdesk/panels/imgview/quantiles.py
|
thocoo/gamma-desk
|
9cb63a65fe23e30e155b3beca862f369b7fa1b7e
|
[
"Apache-2.0"
] | null | null | null |
gdesk/panels/imgview/quantiles.py
|
thocoo/gamma-desk
|
9cb63a65fe23e30e155b3beca862f369b7fa1b7e
|
[
"Apache-2.0"
] | 8 |
2021-04-09T11:31:43.000Z
|
2021-06-09T09:07:18.000Z
|
gdesk/panels/imgview/quantiles.py
|
thocoo/gamma-desk
|
9cb63a65fe23e30e155b3beca862f369b7fa1b7e
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
from .fasthist import hist2d
stdquant = np.ndarray(13)
stdquant[0] = (0.0000316712418331200) #-4 sdev
stdquant[1] = (0.0013498980316301000) #-3 sdev
stdquant[2] = (0.0227501319481792000) #-2 sdev
stdquant[3] = (0.05)
stdquant[4] = (0.1586552539314570000) #-1 sdev or lsdev
stdquant[5] = (0.25) #first quartile
stdquant[6] = (0.50) #median
stdquant[7] = (0.75) #third quartile
stdquant[8] = (0.8413447460685430000) #+1 sdev or usdev
stdquant[9] = (0.95)
stdquant[10] = (0.9772498680518210000) #+2 sdev
stdquant[11] = (0.9986501019683700000) #+3 sdev
stdquant[12] = (0.9999683287581670000) #+4 sdev
def get_standard_quantiles(arr, bins=64, step=None, quantiles=None):
hist, starts, stepsize = hist2d(arr, bins, step, plot=False)
cumhist = np.cumsum(hist)
if quantiles is None:
quantiles = stdquant
else:
quantiles = np.array(quantiles)
n = len(quantiles)
npix = np.multiply.reduce(arr.shape)
quantiles *= npix
thresh = [0] * n
#TO DO: speed up by using interpolation function of numpy
for ind in range(n):
thresh[ind] = starts[(cumhist < quantiles[ind]).sum()]
return thresh
def get_sigma_range(arr, sigma=1, bins=64, step=None):
if sigma == 1:
return get_standard_quantiles(arr, bins, step, (stdquant[4], stdquant[8]))
elif sigma == 2:
return get_standard_quantiles(arr, bins, step, (stdquant[2], stdquant[10]))
elif sigma == 3:
return get_standard_quantiles(arr, bins, step, (stdquant[1], stdquant[11]))
elif sigma == 4:
return get_standard_quantiles(arr, bins, step, (stdquant[0], stdquant[12]))
def get_sigma_range_for_hist(starts, hist, sigma):
cumhist = np.cumsum(hist)
if sigma==1:
quantiles = np.array((stdquant[4], stdquant[8]))
elif sigma==2:
quantiles = np.array((stdquant[2], stdquant[10]))
elif sigma==3:
quantiles = np.array((stdquant[1], stdquant[11]))
elif sigma==4:
quantiles = np.array((stdquant[0], stdquant[12]))
n = len(quantiles)
npix = cumhist[-1]
quantiles *= npix
thresh = [0] * n
#TO DO: speed up by using interpolation function of numpy
for ind in range(n):
thresh[ind] = starts[(cumhist < quantiles[ind]).sum()]
return thresh
| 34.671233 | 83 | 0.590281 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 233 | 0.092058 |
55c0577110244c4fafd7e8c73ddb2adb8d710299
| 10,584 |
py
|
Python
|
isi_sdk/models/report_subreport_policy_file_matching_pattern_or_criteria_item_and_criteria_item.py
|
Atomicology/isilon_sdk_python
|
91039da803ae37ed4abf8d2a3f59c333f3ef1866
|
[
"MIT"
] | null | null | null |
isi_sdk/models/report_subreport_policy_file_matching_pattern_or_criteria_item_and_criteria_item.py
|
Atomicology/isilon_sdk_python
|
91039da803ae37ed4abf8d2a3f59c333f3ef1866
|
[
"MIT"
] | null | null | null |
isi_sdk/models/report_subreport_policy_file_matching_pattern_or_criteria_item_and_criteria_item.py
|
Atomicology/isilon_sdk_python
|
91039da803ae37ed4abf8d2a3f59c333f3ef1866
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
class ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'attribute_exists': 'bool',
'case_sensitive': 'bool',
'field': 'str',
'operator': 'str',
'type': 'str',
'value': 'str',
'whole_word': 'bool'
}
self.attribute_map = {
'attribute_exists': 'attribute_exists',
'case_sensitive': 'case_sensitive',
'field': 'field',
'operator': 'operator',
'type': 'type',
'value': 'value',
'whole_word': 'whole_word'
}
self._attribute_exists = None
self._case_sensitive = None
self._field = None
self._operator = None
self._type = None
self._value = None
self._whole_word = None
@property
def attribute_exists(self):
"""
Gets the attribute_exists of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem.
For \"custom_attribute\" type criteria. The file will match as long as the attribute named by \"field\" exists. Default is true.
:return: The attribute_exists of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem.
:rtype: bool
"""
return self._attribute_exists
@attribute_exists.setter
def attribute_exists(self, attribute_exists):
"""
Sets the attribute_exists of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem.
For \"custom_attribute\" type criteria. The file will match as long as the attribute named by \"field\" exists. Default is true.
:param attribute_exists: The attribute_exists of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem.
:type: bool
"""
self._attribute_exists = attribute_exists
@property
def case_sensitive(self):
"""
Gets the case_sensitive of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem.
If true, the value comparison will be case sensitive. Default is true.
:return: The case_sensitive of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem.
:rtype: bool
"""
return self._case_sensitive
@case_sensitive.setter
def case_sensitive(self, case_sensitive):
"""
Sets the case_sensitive of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem.
If true, the value comparison will be case sensitive. Default is true.
:param case_sensitive: The case_sensitive of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem.
:type: bool
"""
self._case_sensitive = case_sensitive
@property
def field(self):
"""
Gets the field of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem.
The name of the file attribute to match on (only required if this is a custom_attribute type criterion). Default is an empty string \"\".
:return: The field of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem.
:rtype: str
"""
return self._field
@field.setter
def field(self, field):
"""
Sets the field of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem.
The name of the file attribute to match on (only required if this is a custom_attribute type criterion). Default is an empty string \"\".
:param field: The field of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem.
:type: str
"""
self._field = field
@property
def operator(self):
"""
Gets the operator of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem.
How to compare the specified attribute of each file to the specified value.
:return: The operator of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem.
:rtype: str
"""
return self._operator
@operator.setter
def operator(self, operator):
"""
Sets the operator of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem.
How to compare the specified attribute of each file to the specified value.
:param operator: The operator of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem.
:type: str
"""
allowed_values = ["==", "!=", ">", ">=", "<", "<=", "!"]
if operator is not None and operator not in allowed_values:
raise ValueError(
"Invalid value for `operator`, must be one of {0}"
.format(allowed_values)
)
self._operator = operator
@property
def type(self):
"""
Gets the type of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem.
The type of this criterion, that is, which file attribute to match on.
:return: The type of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem.
The type of this criterion, that is, which file attribute to match on.
:param type: The type of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem.
:type: str
"""
allowed_values = ["name", "path", "accessed_time", "accessed_before", "accessed_after", "birth_time", "birth_before", "birth_after", "changed_time", "changed_before", "changed_after", "size", "file_type", "posix_regex_name", "user_name", "user_id", "group_name", "group_id", "no_user", "no_group"]
if type not in allowed_values:
raise ValueError(
"Invalid value for `type`, must be one of {0}"
.format(allowed_values)
)
self._type = type
@property
def value(self):
"""
Gets the value of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem.
The value to compare the specified attribute of each file to.
:return: The value of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem.
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""
Sets the value of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem.
The value to compare the specified attribute of each file to.
:param value: The value of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem.
:type: str
"""
self._value = value
@property
def whole_word(self):
"""
Gets the whole_word of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem.
If true, the attribute must match the entire word. Default is true.
:return: The whole_word of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem.
:rtype: bool
"""
return self._whole_word
@whole_word.setter
def whole_word(self, whole_word):
"""
Sets the whole_word of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem.
If true, the attribute must match the entire word. Default is true.
:param whole_word: The whole_word of this ReportSubreportPolicyFileMatchingPatternOrCriteriaItemAndCriteriaItem.
:type: bool
"""
self._whole_word = whole_word
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 36.371134 | 305 | 0.647865 | 9,839 | 0.929611 | 0 | 0 | 6,957 | 0.657313 | 0 | 0 | 7,083 | 0.669218 |
55c0c3ecc4384f35e0ec61e90038c58f6fa656b9
| 89 |
py
|
Python
|
languages/116/examples/test_problem.py
|
c3333/sphereengine-languages
|
ef76cbffe67407d88519ba1e4bfaa20e3a55ccff
|
[
"Apache-2.0"
] | 5 |
2019-05-05T15:47:24.000Z
|
2021-07-22T14:29:13.000Z
|
languages/116/examples/test_problem.py
|
c3333/sphereengine-languages
|
ef76cbffe67407d88519ba1e4bfaa20e3a55ccff
|
[
"Apache-2.0"
] | 1 |
2022-03-29T14:20:04.000Z
|
2022-03-29T14:20:04.000Z
|
languages/116/examples/test_problem.py
|
c3333/sphereengine-languages
|
ef76cbffe67407d88519ba1e4bfaa20e3a55ccff
|
[
"Apache-2.0"
] | 4 |
2020-02-25T14:30:43.000Z
|
2021-05-12T10:05:05.000Z
|
from sys import stdin
for line in stdin:
n = int(line)
if n == 42:
break
print(n)
| 9.888889 | 21 | 0.629213 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
e9480334f3e96fb87240d084ea753201b541d895
| 367 |
py
|
Python
|
Python/Effective Python/item19.py
|
Vayne-Lover/Effective
|
05f0a08bec8eb112fdb4e7a489d0e33bc81522ff
|
[
"MIT"
] | null | null | null |
Python/Effective Python/item19.py
|
Vayne-Lover/Effective
|
05f0a08bec8eb112fdb4e7a489d0e33bc81522ff
|
[
"MIT"
] | null | null | null |
Python/Effective Python/item19.py
|
Vayne-Lover/Effective
|
05f0a08bec8eb112fdb4e7a489d0e33bc81522ff
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
def remainder(number,divisor):
return number % divisor
def flow_rate(weight,time,period=1):
return weight/time*period
if __name__=="__main__":
print(remainder(20,7))
print(remainder(20,divisor=7))
print(remainder(number=20,divisor=7))
print(remainder(divisor=7,number=20))
print(flow_rate(0.5,3))
print(flow_rate(6,3,100))
| 21.588235 | 39 | 0.708447 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 33 | 0.089918 |
e94dc72d516776aab0f1e035f052d60121476db1
| 1,981 |
py
|
Python
|
create_h5ad.py
|
xmuyulab/DAISM-XMBD
|
916e18a1f111789a1c0bd3c1209d5a73813f3d3a
|
[
"MIT"
] | 2 |
2021-11-05T00:43:16.000Z
|
2021-12-14T08:39:29.000Z
|
create_h5ad.py
|
biosyy/DAISM-XMBD
|
a76f976db8c33ef33f78533a5a2be50a85148e79
|
[
"MIT"
] | 2 |
2021-01-14T19:40:46.000Z
|
2021-01-14T19:41:14.000Z
|
create_h5ad.py
|
biosyy/DAISM-XMBD
|
a76f976db8c33ef33f78533a5a2be50a85148e79
|
[
"MIT"
] | 1 |
2021-08-30T15:11:45.000Z
|
2021-08-30T15:11:45.000Z
|
##############################
## cread purified h5ad file ##
##############################
# input: annotation table and the whole expression profile
# output: purified h5ad file
import os
import pandas as pd
import anndata
import argparse
import gc
import numpy as np
parser = argparse.ArgumentParser(description='cread purified h5ad file for DAISM-XMBD')
parser.add_argument("-anno", type=str, help="annotation table (contains 'sample.name' and 'cell.type' two columns)", default=None)
parser.add_argument("-exp", type=str, help="the whole expression profile (sample.name in column and gene symbol in row)", default=None)
parser.add_argument("-outdir", type=str, help="the directory to store h5ad file", default="example/")
parser.add_argument("-prefix",type=str,help="the prefix of h5ad file",default= "purified")
def main():
inputArgs = parser.parse_args()
if os.path.exists(inputArgs.outdir)==False:
os.mkdir(inputArgs.outdir)
anno_table = pd.read_csv(inputArgs.anno)
cell_list = list(anno_table['cell.type'].unique())
exp = pd.read_csv(inputArgs.exp,sep="\t",index_col=0)
adata = []
for cell in cell_list:
tmp = anno_table[anno_table['cell.type']==cell]
sample_list = tmp['sample.name']
sample_list_inter = list(set(sample_list).intersection(list(exp.columns)))
exp_select=exp[sample_list_inter]
anno = pd.DataFrame(np.repeat(cell,exp_select.shape[1]),columns=['cell.type'])
adata.append(anndata.AnnData(X=exp_select.T.values,
obs=anno,
var=pd.DataFrame(columns=[],index=list(exp_select.index))))
for i in range(1, len(adata)):
print("Concatenating " + str(i))
adata[0] = adata[0].concatenate(adata[1])
del adata[1]
gc.collect()
print(len(adata))
adata = adata[0]
adata.write(inputArgs.outdir+'/'+inputArgs.prefix+'.h5ad')
if __name__ == "__main__":
main()
| 34.155172 | 135 | 0.649167 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 561 | 0.28319 |
e94e1af31de28cb3ee32e1feeddbef4991bf43d4
| 1,424 |
py
|
Python
|
FM_Tuning.py
|
RomanGutin/GEMSEC
|
cb2c26d4747cbd3d4c048787ca41665ef0e64155
|
[
"MIT"
] | null | null | null |
FM_Tuning.py
|
RomanGutin/GEMSEC
|
cb2c26d4747cbd3d4c048787ca41665ef0e64155
|
[
"MIT"
] | null | null | null |
FM_Tuning.py
|
RomanGutin/GEMSEC
|
cb2c26d4747cbd3d4c048787ca41665ef0e64155
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 29 13:56:44 2018
@author: RomanGutin
"""
import pandas as pd
import numpy as np
#Frequency Tuning Loop
amino_letter = ['A','R','D','N','C','E','Q','G','H','I','L','K','M','F','P','S','T','W','Y','V']
length_scores =[4,8,6,6,5,7,7,4,7,5,6,8,7,8,5,5,5,9,8,5]
FM_df = pd.DataFrame(0, index= just_let.index, columns= range(0,81))
FM_score_dict = dict(zip(amino_letter,length_scores))
#splitting amino letter into new independent variables based on its length score#
fm_letter_dict ={}
for letter in amino_letter:
new_vars =[]
for i in range(FM_score_dict[letter]):
new_vars.append(letter+str(i+1))
fm_letter_dict[letter]=new_vars
#generate new FM_tuned dataframe
for seq in FM_df.index:
letter_list= list(seq)
for letter in letter_list:
for var in fm_letter_dict[letter]:
row= FM_df.loc[seq,:]
spot= row[row==0].index[0]
FM_df.loc[seq,spot]= var
FM_df= pd.read_csv('Frequency Tuned Dataset') #data after frequency tuning wit
FM_df.set_index('sequence', inplace= True)
FM_df_arr = np.array(FM_df.values, dtype=[('O', np.float)]).astype(np.float)
#New letter to weight holding the new FM tuned variables
ltw_fm_MLE={}
for amino in amino_letter:
for var in fm_letter_dict[amino]:
ltw_fm_MLE[var]= ltw_AM_n[amino]
ltw_fm_MLE = np.load('ltw_fm_MLE.npy').item()
| 30.297872 | 96 | 0.656601 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 432 | 0.303371 |
e94e9483c973c25abe2c71d5816ab7d9b774441e
| 692 |
py
|
Python
|
unified_api/brokers/kafka/consumer.py
|
campos537/deep-fashion-system
|
1de31dd6260cc967e1832cff63ae7e537a3a4e9d
|
[
"Unlicense"
] | 1 |
2021-04-06T00:43:26.000Z
|
2021-04-06T00:43:26.000Z
|
unified_api/brokers/kafka/consumer.py
|
campos537/deep-fashion-system
|
1de31dd6260cc967e1832cff63ae7e537a3a4e9d
|
[
"Unlicense"
] | null | null | null |
unified_api/brokers/kafka/consumer.py
|
campos537/deep-fashion-system
|
1de31dd6260cc967e1832cff63ae7e537a3a4e9d
|
[
"Unlicense"
] | null | null | null |
from kafka import KafkaConsumer
class Consumer:
def __init__(self, config):
bootstrap_server = config.get(
"bootstrap_server") + ":" + config.get("port")
self.consumer = KafkaConsumer(config.get(
"subscription_id_2"), bootstrap_servers=bootstrap_server, api_version=(0, 10),
auto_offset_reset='earliest', enable_auto_commit=True, group_id="test")
self.messages = []
def get_message(self):
if len(self.messages) > 0:
mes = self.messages.pop(0)
return mes
def listen(self):
for message in self.consumer:
self.messages.append(message.value)
| 34.6 | 109 | 0.601156 | 658 | 0.950867 | 0 | 0 | 0 | 0 | 0 | 0 | 62 | 0.089595 |
e94ef8f2fd09f77bca0e59bab465fb16e55c0ca1
| 2,159 |
py
|
Python
|
utils.py
|
mino2401200231/File-convertor
|
6fb438dc5f37bf0efd78e18e4848b4cdb0331343
|
[
"MIT"
] | null | null | null |
utils.py
|
mino2401200231/File-convertor
|
6fb438dc5f37bf0efd78e18e4848b4cdb0331343
|
[
"MIT"
] | null | null | null |
utils.py
|
mino2401200231/File-convertor
|
6fb438dc5f37bf0efd78e18e4848b4cdb0331343
|
[
"MIT"
] | 2 |
2021-08-12T06:37:52.000Z
|
2021-09-05T13:03:36.000Z
|
# utilities
import os
from re import sub
import uuid
import subprocess
# Image To Pdf
import img2pdf
# PDF To Images
from pdf2image import convert_from_path
# PDF To Word
from pdf2docx import parse
_BASE_DIR = os.getcwd()
_BASE_DIR_FILE = os.path.join(_BASE_DIR, "files")
def process_image_to_pdf(files, pdf_name):
img = []
with open(f"{_BASE_DIR_FILE}/{pdf_name}.pdf","wb") as fil:
for fname in files:
path = os.path.join(_BASE_DIR_FILE, fname)
img.append(path)
fil.write(img2pdf.convert(img))
return pdf_name
def process_word_to_pdf(file):
file_address = os.path.join(_BASE_DIR_FILE, file)
command = ['lowriter' ,'--convert-to','pdf' , file_address , "--outdir", _BASE_DIR_FILE]
command_run = subprocess.run(command)
file_name = -1
if command_run.returncode == 0:
file_name = ".".join(file.split(".")[:-1]) + ".pdf"
return file_name
def process_pdf_to_images(file):
file_address = os.path.join(_BASE_DIR_FILE, file)
folder_name = str(uuid.uuid1())
folder_address = os.path.join(_BASE_DIR_FILE, folder_name)
os.mkdir(folder_address)
try:
convert_from_path(file_address, output_folder=folder_address, fmt="jpeg", thread_count=10, jpegopt="quality")
return folder_address
except:
import shutil
shutil.rmtree(folder_address)
return -1
def process_pdf_to_word(file):
file_address = os.path.join(_BASE_DIR_FILE, file)
word_file = str(uuid.uuid1()) + ".docx"
word_file_address = os.path.join(_BASE_DIR_FILE, word_file)
try:
parse(file_address, word_file_address, multi_processing=True)
return word_file_address
except:
return -1
def del_user_files(list):
for file in list:
file_address = os.path.join(_BASE_DIR_FILE, file)
try:
os.remove(file_address)
except:
pass
def del_one_file(file):
try:
os.remove(file)
except:
try:
file_address = os.path.join(_BASE_DIR_FILE, file)
os.remove(file_address)
except:
pass
pass
return 1
| 26.329268 | 117 | 0.656322 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 171 | 0.079203 |
e950fb1913401e7e3634e1210cfe24f9fddcf950
| 2,026 |
py
|
Python
|
screens/tasks/tasks.py
|
athrn/kognitivo
|
15822338778213c09ea654ec4e06a300129f9478
|
[
"Apache-2.0"
] | 80 |
2017-11-13T21:58:55.000Z
|
2022-01-03T20:10:42.000Z
|
screens/tasks/tasks.py
|
athrn/kognitivo
|
15822338778213c09ea654ec4e06a300129f9478
|
[
"Apache-2.0"
] | null | null | null |
screens/tasks/tasks.py
|
athrn/kognitivo
|
15822338778213c09ea654ec4e06a300129f9478
|
[
"Apache-2.0"
] | 21 |
2017-11-14T09:47:41.000Z
|
2021-11-23T06:44:31.000Z
|
from kivy.uix.screenmanager import Screen
from kivy.properties import StringProperty, ObjectProperty, NumericProperty, ListProperty, BooleanProperty
from kivy.app import App
from kivy.logger import Logger
from library_widgets import TrackingScreenMixin
from utils import import_kv
import_kv(__file__)
class TasksScreen(TrackingScreenMixin, Screen):
family = StringProperty(None, allownone=True)
played_times = NumericProperty()
tasks = ListProperty()
_main_manager = ObjectProperty()
loading = ObjectProperty()
quick_test = BooleanProperty(False)
def on_quick_test(self, *args):
if self._main_manager:
self.update_content()
@property
def main_manager(self):
if not self._main_manager:
from .content import TaskScreenManager
self._main_manager = TaskScreenManager()
return self._main_manager
def update_content(self, *args, **kwargs):
if self.quick_test:
self.main_manager.start_test(self.family, self.tasks)
self.main_manager.current = 'test'
else:
self.main_manager.task_sets_screen.fill()
self.main_manager.current = 'task_sets'
app = App.get_running_app()
sessions_starts = app.storage['sessions']['started']
app.tracker.send_event('tasks', 'sessions', label='started', value=sessions_starts + 1)
app.storage['sessions'] = {"started": sessions_starts + 1,
"finished": app.storage['sessions']['finished']}
self.played_times += 1
Logger.info("Tasks: playing %s times" % self.played_times)
if self.played_times == 10:
App.get_running_app().google_client.unlock_achievement("addicted")
if self.main_manager.parent != self:
self.loading.hide(self._main_manager)
def on_enter(self, *args):
super(TasksScreen, self).on_enter(*args)
app = App.get_running_app()
app.initialize_billing(self.update_content)
| 35.54386 | 106 | 0.673248 | 1,719 | 0.84847 | 0 | 0 | 211 | 0.104146 | 0 | 0 | 146 | 0.072063 |
e954754c8db1dbc45662c97eec7de33aed7d3e19
| 1,240 |
py
|
Python
|
imclassify/train_model.py
|
AdamSpannbauer/imclassify
|
27c24576ef6a2ed344cad7f568f7e4cdfe6ea0bd
|
[
"MIT"
] | null | null | null |
imclassify/train_model.py
|
AdamSpannbauer/imclassify
|
27c24576ef6a2ed344cad7f568f7e4cdfe6ea0bd
|
[
"MIT"
] | null | null | null |
imclassify/train_model.py
|
AdamSpannbauer/imclassify
|
27c24576ef6a2ed344cad7f568f7e4cdfe6ea0bd
|
[
"MIT"
] | null | null | null |
"""Train logistic regression model on hdf5 features for classification
Modified from:
https://gurus.pyimagesearch.com/topic/transfer-learning-example-dogs-and-cats/
"""
import pickle
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
def train_model(h5py_db, model_output='model.pickle', percent_train=1.0):
"""Train logistic regression classifier
:param h5py_db: path to HDF5 database containing 'features', 'labels', & 'label_names'
:param model_output: path to save trained model to using pickle
:param percent_train: percent of images to be used for training (instead of testing)
:return: None; output is written to `model_output`
"""
i = int(h5py_db['labels'].shape[0] * percent_train)
# C decided with sklearn.model_selection.GridSearchCV
model = LogisticRegression(C=0.1)
model.fit(h5py_db['features'][:i], h5py_db['labels'][:i])
if percent_train < 1.0:
preds = model.predict(h5py_db['features'][i:])
print(classification_report(h5py_db['labels'][i:], preds,
target_names=h5py_db['label_names']))
with open(model_output, 'wb') as f:
f.write(pickle.dumps(model))
| 37.575758 | 90 | 0.704839 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 652 | 0.525806 |
e955b53af943d2f078f97e589977586caea5ae03
| 1,760 |
py
|
Python
|
Test/final/V5_baseline_CC_ref/aggregate.py
|
WangWenhao0716/ISC-Track1-Submission
|
3484142c0550262c90fc229e5e0ba719c58c592d
|
[
"MIT"
] | 46 |
2021-10-31T08:02:51.000Z
|
2022-03-11T08:42:30.000Z
|
Test/final/V5_baseline_CC_ref/aggregate.py
|
WangWenhao0716/ISC-Track1-Submission
|
3484142c0550262c90fc229e5e0ba719c58c592d
|
[
"MIT"
] | 3 |
2021-11-18T09:35:45.000Z
|
2022-03-31T01:20:34.000Z
|
Test/final/V5_baseline_CC_ref/aggregate.py
|
WangWenhao0716/ISC-Track1-Submission
|
3484142c0550262c90fc229e5e0ba719c58c592d
|
[
"MIT"
] | 8 |
2021-12-01T08:02:08.000Z
|
2022-02-26T13:29:36.000Z
|
import pandas as pd
v_4 = pd.read_csv('50/predictions_dev_queries_50k_normalized_exp.csv')
temp = list(v_4['query_id'])
v_4['query_id'] = list(v_4['reference_id'])
v_4['reference_id'] = temp
v_5 = pd.read_csv('ibn/predictions_dev_queries_50k_normalized_exp.csv')
temp = list(v_5['query_id'])
v_5['query_id'] = list(v_5['reference_id'])
v_5['reference_id'] = temp
v_6 = pd.read_csv('152/predictions_dev_queries_50k_normalized_exp.csv')
temp = list(v_6['query_id'])
v_6['query_id'] = list(v_6['reference_id'])
v_6['reference_id'] = temp
v_4_query = list(v_4['query_id'])
v_4_reference = list(v_4['reference_id'])
v_4_com = []
for i in range(len(v_4)):
v_4_com.append((v_4_query[i],v_4_reference[i]))
v_5_query = list(v_5['query_id'])
v_5_reference = list(v_5['reference_id'])
v_5_com = []
for i in range(len(v_5)):
v_5_com.append((v_5_query[i],v_5_reference[i]))
v_6_query = list(v_6['query_id'])
v_6_reference = list(v_6['reference_id'])
v_6_com = []
for i in range(len(v_6)):
v_6_com.append((v_6_query[i],v_6_reference[i]))
inter_45 = list(set(v_4_com).intersection(set(v_5_com)))
inter_46 = list(set(v_4_com).intersection(set(v_6_com)))
inter_456 = list(set(inter_45).intersection(set(inter_46)))
new_456 = pd.DataFrame()
q = []
for i in range(len(inter_456)):
q.append(inter_456[i][0])
r = []
for i in range(len(inter_456)):
r.append(inter_456[i][1])
new_456['query_id'] = q
new_456['reference_id'] = r
df_2 = pd.merge(new_456, v_4, on=['query_id','reference_id'], how='inner')
df_3 = pd.merge(new_456, v_5, on=['query_id','reference_id'], how='inner')
df_4 = pd.merge(new_456, v_6, on=['query_id','reference_id'], how='inner')
fast_456 = pd.concat((df_2,df_3,df_4))
fast_456.to_csv('R-baseline-CC-234-50k.csv',index=False)
| 31.428571 | 74 | 0.710795 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 515 | 0.292614 |
e95640499c478bef869502f2fe8e6dcadc430eb2
| 399 |
py
|
Python
|
src/commands/i_stat/anticheat.py
|
slimsevernake/osbb-bot
|
3a6b9512523a5374034c2f1cdb83ea5cd6de0ac8
|
[
"MIT"
] | 9 |
2018-08-19T12:55:58.000Z
|
2021-07-17T15:38:40.000Z
|
src/commands/i_stat/anticheat.py
|
slimsevernake/osbb-bot
|
3a6b9512523a5374034c2f1cdb83ea5cd6de0ac8
|
[
"MIT"
] | 124 |
2018-07-31T13:43:58.000Z
|
2022-03-11T23:27:43.000Z
|
src/commands/i_stat/anticheat.py
|
slimsevernake/osbb-bot
|
3a6b9512523a5374034c2f1cdb83ea5cd6de0ac8
|
[
"MIT"
] | 3 |
2019-10-21T13:18:14.000Z
|
2021-02-09T11:05:10.000Z
|
from src.utils.cache import cache
def cheats_key(chat_id: int, user_id: int) -> str:
return f'i_stat:cheats:{chat_id}:{user_id}'
def cheats_found(chat_id: int, user_id: int, sum_count: int) -> bool:
key = cheats_key(chat_id, user_id)
sums = cache.get(key, 0)
sums += sum_count
if sums > 50:
return True
cache.set(key, sums, time=10 * 60) # 10m
return False
| 23.470588 | 69 | 0.649123 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 41 | 0.102757 |
e9569e3a4e8763ed40f2c7965c464907cae6ec57
| 744 |
py
|
Python
|
tutorial/flask-api-mongo/app/services/mail_service.py
|
carrenolg/python
|
7c1f0013d911177ce3bc2c5ea58b8e6e562b7282
|
[
"Apache-2.0"
] | null | null | null |
tutorial/flask-api-mongo/app/services/mail_service.py
|
carrenolg/python
|
7c1f0013d911177ce3bc2c5ea58b8e6e562b7282
|
[
"Apache-2.0"
] | null | null | null |
tutorial/flask-api-mongo/app/services/mail_service.py
|
carrenolg/python
|
7c1f0013d911177ce3bc2c5ea58b8e6e562b7282
|
[
"Apache-2.0"
] | null | null | null |
from threading import Thread
from flask_mail import Mail, Message
from resources.errors import InternalServerError
mail = Mail(app=None)
app = None
def initialize_mail_service(appiclation):
global mail
global app
mail = Mail(app=appiclation)
app = appiclation
def send_async_email(app, msg, mail):
with app.app_context():
try:
mail.send(msg)
except ConnectionRefusedError:
raise InternalServerError("[MAIL SERVER] not working")
def send_email(subject, sender, recipients, text_body, html_body):
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
Thread(target=send_async_email, args=(app, msg, mail)).start()
| 25.655172 | 66 | 0.711022 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 27 | 0.03629 |
e9570255d9896891bde513fb7630bb22b041b8d0
| 18,541 |
py
|
Python
|
vxsandbox/resources/tests/test_http.py
|
praekeltfoundation/vumi-sandbox
|
1e2dfca8325ce98e52fe32a072749fe4cf7f448d
|
[
"BSD-3-Clause"
] | 1 |
2021-05-26T08:38:28.000Z
|
2021-05-26T08:38:28.000Z
|
vxsandbox/resources/tests/test_http.py
|
praekelt/vumi-sandbox
|
1e2dfca8325ce98e52fe32a072749fe4cf7f448d
|
[
"BSD-3-Clause"
] | 24 |
2015-03-04T08:33:12.000Z
|
2016-08-18T07:57:12.000Z
|
vxsandbox/resources/tests/test_http.py
|
praekeltfoundation/vumi-sandbox
|
1e2dfca8325ce98e52fe32a072749fe4cf7f448d
|
[
"BSD-3-Clause"
] | null | null | null |
import base64
import json
from OpenSSL.SSL import (
VERIFY_PEER, VERIFY_FAIL_IF_NO_PEER_CERT, VERIFY_NONE,
SSLv3_METHOD, SSLv23_METHOD, TLSv1_METHOD)
from twisted.web.http_headers import Headers
from twisted.internet.defer import inlineCallbacks, fail, succeed
from vxsandbox.resources.http import (
HttpClientContextFactory, HttpClientPolicyForHTTPS, make_context_factory,
HttpClientResource)
from vxsandbox.resources.tests.utils import ResourceTestCaseBase
class DummyResponse(object):
def __init__(self):
self.headers = Headers({})
class DummyHTTPClient(object):
def __init__(self):
self._next_http_request_result = None
self.http_requests = []
def set_agent(self, agent):
self.agent = agent
def get_context_factory(self):
# We need to dig around inside our Agent to find the context factory.
# Since this involves private attributes that have changed a few times
# recently, we need to try various options.
if hasattr(self.agent, "_contextFactory"):
# For Twisted 13.x
return self.agent._contextFactory
elif hasattr(self.agent, "_policyForHTTPS"):
# For Twisted 14.x
return self.agent._policyForHTTPS
elif hasattr(self.agent, "_endpointFactory"):
# For Twisted 15.0.0 (and possibly newer)
return self.agent._endpointFactory._policyForHTTPS
else:
raise NotImplementedError(
"I can't find the context factory on this Agent. This seems"
" to change every few versions of Twisted.")
def fail_next(self, error):
self._next_http_request_result = fail(error)
def succeed_next(self, body, code=200, headers={}):
default_headers = {
'Content-Length': str(len(body)),
}
default_headers.update(headers)
response = DummyResponse()
response.code = code
for header, value in default_headers.items():
response.headers.addRawHeader(header, value)
response.content = lambda: succeed(body)
self._next_http_request_result = succeed(response)
def request(self, *args, **kw):
self.http_requests.append((args, kw))
return self._next_http_request_result
class TestHttpClientResource(ResourceTestCaseBase):
resource_cls = HttpClientResource
@inlineCallbacks
def setUp(self):
super(TestHttpClientResource, self).setUp()
yield self.create_resource({})
self.dummy_client = DummyHTTPClient()
self.patch(self.resource_cls,
'http_client_class', self.get_dummy_client)
def get_dummy_client(self, agent):
self.dummy_client.set_agent(agent)
return self.dummy_client
def http_request_fail(self, error):
self.dummy_client.fail_next(error)
def http_request_succeed(self, body, code=200, headers={}):
self.dummy_client.succeed_next(body, code, headers)
def assert_not_unicode(self, arg):
self.assertFalse(isinstance(arg, unicode))
def get_context_factory(self):
return self.dummy_client.get_context_factory()
def get_context(self, context_factory=None):
if context_factory is None:
context_factory = self.get_context_factory()
if hasattr(context_factory, 'creatorForNetloc'):
# This context_factory is a new-style IPolicyForHTTPS
# implementation, so we need to get a context from through its
# client connection creator. The creator could either be a wrapper
# around a ClientContextFactory (in which case we treat it like
# one) or a ClientTLSOptions object (which means we have to grab
# the context from a private attribute).
creator = context_factory.creatorForNetloc('example.com', 80)
if hasattr(creator, 'getContext'):
return creator.getContext()
else:
return creator._ctx
else:
# This context_factory is an old-style WebClientContextFactory and
# will build us a context object if we ask nicely.
return context_factory.getContext('example.com', 80)
def assert_http_request(self, url, method='GET', headers=None, data=None,
timeout=None, files=None):
timeout = (timeout if timeout is not None
else self.resource.timeout)
args = (method, url,)
kw = dict(headers=headers, data=data,
timeout=timeout, files=files)
[(actual_args, actual_kw)] = self.dummy_client.http_requests
# NOTE: Files are handed over to treq as file pointer-ish things
# which in our case are `StringIO` instances.
actual_kw_files = actual_kw.get('files')
if actual_kw_files is not None:
actual_kw_files = actual_kw.pop('files', None)
kw_files = kw.pop('files', {})
for name, file_data in actual_kw_files.items():
kw_file_data = kw_files[name]
file_name, content_type, sio = file_data
self.assertEqual(
(file_name, content_type, sio.getvalue()),
kw_file_data)
self.assertEqual((actual_args, actual_kw), (args, kw))
self.assert_not_unicode(actual_args[0])
self.assert_not_unicode(actual_kw.get('data'))
headers = actual_kw.get('headers')
if headers is not None:
for key, values in headers.items():
self.assert_not_unicode(key)
for value in values:
self.assert_not_unicode(value)
def test_make_context_factory_no_method_verify_none(self):
context_factory = make_context_factory(verify_options=VERIFY_NONE)
self.assertIsInstance(context_factory, HttpClientContextFactory)
self.assertEqual(context_factory.verify_options, VERIFY_NONE)
self.assertEqual(context_factory.ssl_method, None)
self.assertEqual(
self.get_context(context_factory).get_verify_mode(), VERIFY_NONE)
def test_make_context_factory_no_method_verify_peer(self):
# This test's behaviour depends on the version of Twisted being used.
context_factory = make_context_factory(verify_options=VERIFY_PEER)
context = self.get_context(context_factory)
self.assertEqual(context_factory.ssl_method, None)
self.assertNotEqual(context.get_verify_mode(), VERIFY_NONE)
if HttpClientPolicyForHTTPS is None:
# We have Twisted<14.0.0
self.assertIsInstance(context_factory, HttpClientContextFactory)
self.assertEqual(context_factory.verify_options, VERIFY_PEER)
self.assertEqual(context.get_verify_mode(), VERIFY_PEER)
else:
self.assertIsInstance(context_factory, HttpClientPolicyForHTTPS)
def test_make_context_factory_no_method_verify_peer_or_fail(self):
# This test's behaviour depends on the version of Twisted being used.
context_factory = make_context_factory(
verify_options=(VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT))
context = self.get_context(context_factory)
self.assertEqual(context_factory.ssl_method, None)
self.assertNotEqual(context.get_verify_mode(), VERIFY_NONE)
if HttpClientPolicyForHTTPS is None:
# We have Twisted<14.0.0
self.assertIsInstance(context_factory, HttpClientContextFactory)
self.assertEqual(
context_factory.verify_options,
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT)
self.assertEqual(
context.get_verify_mode(),
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT)
else:
self.assertIsInstance(context_factory, HttpClientPolicyForHTTPS)
def test_make_context_factory_no_method_no_verify(self):
# This test's behaviour depends on the version of Twisted being used.
context_factory = make_context_factory()
self.assertEqual(context_factory.ssl_method, None)
if HttpClientPolicyForHTTPS is None:
# We have Twisted<14.0.0
self.assertIsInstance(context_factory, HttpClientContextFactory)
self.assertEqual(context_factory.verify_options, None)
else:
self.assertIsInstance(context_factory, HttpClientPolicyForHTTPS)
def test_make_context_factory_sslv3_no_verify(self):
# This test's behaviour depends on the version of Twisted being used.
context_factory = make_context_factory(ssl_method=SSLv3_METHOD)
self.assertEqual(context_factory.ssl_method, SSLv3_METHOD)
if HttpClientPolicyForHTTPS is None:
# We have Twisted<14.0.0
self.assertIsInstance(context_factory, HttpClientContextFactory)
self.assertEqual(context_factory.verify_options, None)
else:
self.assertIsInstance(context_factory, HttpClientPolicyForHTTPS)
@inlineCallbacks
def test_handle_get(self):
self.http_request_succeed("foo")
reply = yield self.dispatch_command('get',
url='http://www.example.com')
self.assertTrue(reply['success'])
self.assertEqual(reply['body'], "foo")
self.assert_http_request('http://www.example.com', method='GET')
@inlineCallbacks
def test_handle_post(self):
self.http_request_succeed("foo")
reply = yield self.dispatch_command('post',
url='http://www.example.com')
self.assertTrue(reply['success'])
self.assertEqual(reply['body'], "foo")
self.assert_http_request('http://www.example.com', method='POST')
@inlineCallbacks
def test_handle_patch(self):
self.http_request_succeed("foo")
reply = yield self.dispatch_command('patch',
url='http://www.example.com')
self.assertTrue(reply['success'])
self.assertEqual(reply['body'], "foo")
self.assert_http_request('http://www.example.com', method='PATCH')
@inlineCallbacks
def test_handle_head(self):
self.http_request_succeed("foo")
reply = yield self.dispatch_command('head',
url='http://www.example.com')
self.assertTrue(reply['success'])
self.assertEqual(reply['body'], "foo")
self.assert_http_request('http://www.example.com', method='HEAD')
@inlineCallbacks
def test_handle_delete(self):
self.http_request_succeed("foo")
reply = yield self.dispatch_command('delete',
url='http://www.example.com')
self.assertTrue(reply['success'])
self.assertEqual(reply['body'], "foo")
self.assert_http_request('http://www.example.com', method='DELETE')
@inlineCallbacks
def test_handle_put(self):
self.http_request_succeed("foo")
reply = yield self.dispatch_command('put',
url='http://www.example.com')
self.assertTrue(reply['success'])
self.assertEqual(reply['body'], "foo")
self.assert_http_request('http://www.example.com', method='PUT')
@inlineCallbacks
def test_failed_get(self):
self.http_request_fail(ValueError("HTTP request failed"))
reply = yield self.dispatch_command('get',
url='http://www.example.com')
self.assertFalse(reply['success'])
self.assertEqual(reply['reason'], "HTTP request failed")
self.assert_http_request('http://www.example.com', method='GET')
@inlineCallbacks
def test_null_url(self):
reply = yield self.dispatch_command('get')
self.assertFalse(reply['success'])
self.assertEqual(reply['reason'], "No URL given")
@inlineCallbacks
def test_https_request(self):
# This test's behaviour depends on the version of Twisted being used.
self.http_request_succeed("foo")
reply = yield self.dispatch_command('get',
url='https://www.example.com')
self.assertTrue(reply['success'])
self.assertEqual(reply['body'], "foo")
self.assert_http_request('https://www.example.com', method='GET')
context_factory = self.get_context_factory()
self.assertEqual(context_factory.ssl_method, None)
if HttpClientPolicyForHTTPS is None:
self.assertIsInstance(context_factory, HttpClientContextFactory)
self.assertEqual(context_factory.verify_options, None)
else:
self.assertIsInstance(context_factory, HttpClientPolicyForHTTPS)
@inlineCallbacks
def test_https_request_verify_none(self):
self.http_request_succeed("foo")
reply = yield self.dispatch_command(
'get', url='https://www.example.com',
verify_options=['VERIFY_NONE'])
self.assertTrue(reply['success'])
self.assertEqual(reply['body'], "foo")
self.assert_http_request('https://www.example.com', method='GET')
context = self.get_context()
self.assertEqual(context.get_verify_mode(), VERIFY_NONE)
@inlineCallbacks
def test_https_request_verify_peer_or_fail(self):
# This test's behaviour depends on the version of Twisted being used.
self.http_request_succeed("foo")
reply = yield self.dispatch_command(
'get', url='https://www.example.com',
verify_options=['VERIFY_PEER', 'VERIFY_FAIL_IF_NO_PEER_CERT'])
self.assertTrue(reply['success'])
self.assertEqual(reply['body'], "foo")
self.assert_http_request('https://www.example.com', method='GET')
context = self.get_context()
# We don't control verify mode in newer Twisted.
self.assertNotEqual(context.get_verify_mode(), VERIFY_NONE)
if HttpClientPolicyForHTTPS is None:
self.assertEqual(
context.get_verify_mode(),
VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT)
@inlineCallbacks
def test_handle_post_files(self):
self.http_request_succeed('')
reply = yield self.dispatch_command(
'post', url='https://www.example.com', files={
'foo': {
'file_name': 'foo.json',
'content_type': 'application/json',
'data': base64.b64encode(json.dumps({'foo': 'bar'})),
}
})
self.assertTrue(reply['success'])
self.assert_http_request(
'https://www.example.com', method='POST', files={
'foo': ('foo.json', 'application/json',
json.dumps({'foo': 'bar'})),
})
@inlineCallbacks
def test_data_limit_exceeded_using_head_method(self):
self.http_request_succeed('', headers={
'Content-Length': str(self.resource.DEFAULT_DATA_LIMIT + 1),
})
reply = yield self.dispatch_command(
'head', url='https://www.example.com',)
self.assertTrue(reply['success'])
self.assertEqual(reply['body'], "")
self.assert_http_request('https://www.example.com', method='HEAD')
@inlineCallbacks
def test_data_limit_exceeded_using_header(self):
self.http_request_succeed('', headers={
'Content-Length': str(self.resource.DEFAULT_DATA_LIMIT + 1),
})
reply = yield self.dispatch_command(
'get', url='https://www.example.com',)
self.assertFalse(reply['success'])
self.assertEqual(
reply['reason'],
'Received %d bytes, maximum of %s bytes allowed.' % (
self.resource.DEFAULT_DATA_LIMIT + 1,
self.resource.DEFAULT_DATA_LIMIT,))
@inlineCallbacks
def test_data_limit_exceeded_inferred_from_body(self):
self.http_request_succeed('1' * (self.resource.DEFAULT_DATA_LIMIT + 1))
reply = yield self.dispatch_command(
'get', url='https://www.example.com',)
self.assertFalse(reply['success'])
self.assertEqual(
reply['reason'],
'Received %d bytes, maximum of %s bytes allowed.' % (
self.resource.DEFAULT_DATA_LIMIT + 1,
self.resource.DEFAULT_DATA_LIMIT,))
@inlineCallbacks
def test_https_request_method_default(self):
self.http_request_succeed("foo")
reply = yield self.dispatch_command(
'get', url='https://www.example.com')
self.assertTrue(reply['success'])
self.assertEqual(reply['body'], "foo")
self.assert_http_request('https://www.example.com', method='GET')
context_factory = self.get_context_factory()
self.assertEqual(context_factory.ssl_method, None)
@inlineCallbacks
def test_https_request_method_SSLv3(self):
self.http_request_succeed("foo")
reply = yield self.dispatch_command(
'get', url='https://www.example.com', ssl_method='SSLv3')
self.assertTrue(reply['success'])
self.assertEqual(reply['body'], "foo")
self.assert_http_request('https://www.example.com', method='GET')
context_factory = self.get_context_factory()
self.assertEqual(context_factory.ssl_method, SSLv3_METHOD)
@inlineCallbacks
def test_https_request_method_SSLv23(self):
self.http_request_succeed("foo")
reply = yield self.dispatch_command(
'get', url='https://www.example.com', ssl_method='SSLv23')
self.assertTrue(reply['success'])
self.assertEqual(reply['body'], "foo")
self.assert_http_request('https://www.example.com', method='GET')
context_factory = self.get_context_factory()
self.assertEqual(context_factory.ssl_method, SSLv23_METHOD)
@inlineCallbacks
def test_https_request_method_TLSv1(self):
self.http_request_succeed("foo")
reply = yield self.dispatch_command(
'get', url='https://www.example.com', ssl_method='TLSv1')
self.assertTrue(reply['success'])
self.assertEqual(reply['body'], "foo")
self.assert_http_request('https://www.example.com', method='GET')
context_factory = self.get_context_factory()
self.assertEqual(context_factory.ssl_method, TLSv1_METHOD)
| 42.138636 | 79 | 0.644356 | 18,054 | 0.973734 | 9,198 | 0.49609 | 9,618 | 0.518742 | 0 | 0 | 3,520 | 0.18985 |
e9576153377cb8542e00446bc31a32f660d4a2a6
| 99 |
py
|
Python
|
examples/port_demo.py
|
smilelight/lightUtils
|
e9b7ed35ed50cf6b7c6284fe60918ce4dc71beac
|
[
"MIT"
] | 2 |
2020-01-23T02:03:19.000Z
|
2020-12-13T09:05:45.000Z
|
examples/port_demo.py
|
smilelight/lightUtils
|
e9b7ed35ed50cf6b7c6284fe60918ce4dc71beac
|
[
"MIT"
] | null | null | null |
examples/port_demo.py
|
smilelight/lightUtils
|
e9b7ed35ed50cf6b7c6284fe60918ce4dc71beac
|
[
"MIT"
] | null | null | null |
from lightutils import get_free_tcp_port
port = get_free_tcp_port()
print(port)
print(type(port))
| 16.5 | 40 | 0.808081 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
e95a4fa6b39694c0762d544398c6a91dc4eb000f
| 722 |
py
|
Python
|
soundDB/__init__.py
|
gjoseph92/soundDB2
|
4d9cc93cc596a5089233f17b0b8be252f73e1224
|
[
"CC0-1.0"
] | 3 |
2017-05-16T19:37:32.000Z
|
2020-03-29T21:54:33.000Z
|
soundDB/__init__.py
|
gjoseph92/soundDB2
|
4d9cc93cc596a5089233f17b0b8be252f73e1224
|
[
"CC0-1.0"
] | 19 |
2016-12-02T20:47:24.000Z
|
2021-10-05T19:01:01.000Z
|
soundDB/__init__.py
|
gjoseph92/soundDB2
|
4d9cc93cc596a5089233f17b0b8be252f73e1224
|
[
"CC0-1.0"
] | 2 |
2017-05-10T23:01:06.000Z
|
2019-12-27T19:49:29.000Z
|
from .accessor import Accessor
from . import parsers
import inspect
def populateAccessors():
"""
Find all filetype-specific Accessor subclasses in the parsers file (i.e. NVSPL, SRCID, etc.) and instantiate them.
This way, one instance of each Accessor is added to the soundDB namespace under the name of the Endpoint it uses.
"""
predicate = lambda obj: inspect.isclass(obj) and issubclass(obj, Accessor) and obj is not Accessor
specificAccessorSubclasses = inspect.getmembers(parsers, predicate)
accessors = { cls.endpointName: cls for name, cls in specificAccessorSubclasses }
return accessors
globals().update(populateAccessors())
del inspect, accessor, parsers, populateAccessors
| 34.380952 | 118 | 0.756233 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 248 | 0.34349 |
e95c3c23ff20e2cb3d818ef3d5c5a11d27117013
| 3,953 |
py
|
Python
|
ipbb/models/ipbb.py
|
aagusti/i-pbb
|
8178f68744b440f96f2c3d114c2485d728655e24
|
[
"MIT"
] | null | null | null |
ipbb/models/ipbb.py
|
aagusti/i-pbb
|
8178f68744b440f96f2c3d114c2485d728655e24
|
[
"MIT"
] | null | null | null |
ipbb/models/ipbb.py
|
aagusti/i-pbb
|
8178f68744b440f96f2c3d114c2485d728655e24
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from sqlalchemy import (
Column,
Integer,
Text,
DateTime,
SmallInteger,
BigInteger,
String,
Date,
ForeignKey,
UniqueConstraint
)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.orm import (
scoped_session,
sessionmaker,
relationship
)
from ..tools import as_timezone
from ..models import Base, CommonModel, DefaultModel, DBSession
class Propinsi(Base, DefaultModel):
__tablename__ = 'propinsis'
__table_args__ = {'extend_existing':True}
kode = Column(String(2), unique=True, nullable=False)
nama = Column(String(30), unique=True, nullable=False)
@classmethod
def get_deferred(cls):
return DBSession.query(cls.id, cls.nama).order_by(cls.kode).all()
class Dati2(Base, DefaultModel):
__tablename__ = 'dati2s'
__table_args__ = (UniqueConstraint('propinsi_id','kode', name="dati2_kode_key"),
{'extend_existing':True})
kode = Column(String(2), nullable=False)
nama = Column(String(30), unique=True, nullable=False)
propinsi_id = Column(Integer, ForeignKey('propinsis.id'))
propinsi = relationship("Propinsi", backref="dati2")
@classmethod
def get_by_kode(cls, propinsi_id, kode):
return cls.query().filter_by(propinsi_id=propinsi_id,kode=kode).first()
@classmethod
def get_deferred(cls):
return DBSession.query(cls.id, cls.nama).order_by(cls.kode).all()
class Registers(Base, DefaultModel):
__tablename__ = 'registers'
__table_args__ = {'extend_existing':True}
kode = Column(String(5), unique=True, nullable=False)
nama = Column(String(30), unique=True, nullable=False)
alamat_pemda = Column(String(128), unique=True, nullable=False)
nama_pic = Column(String(30), nullable=False)
nip_pic = Column(String(18), unique=True, nullable=False)
no_telpon = Column(String(18), unique=True, nullable=False)
no_hp = Column(String(18), unique=True, nullable=False)
tgl_register = Column(Date, nullable=False)
tgl_update = Column(Date, nullable=True)
tgl_valid = Column(Date, nullable=False)
status = Column(SmallInteger, nullable=False, default=0)
e_mail = Column(String(32), unique=True, nullable=False)
jns_bayar = Column(SmallInteger, nullable=False) #Transfer/Kartu Kredit
tagih_nama = Column(String(30), nullable=False)
tagih_alamat = Column(String(128), nullable=False)
password = Column(String(128), nullable=False)
periode_bayar = Column(SmallInteger, nullable=False)
rpc_url = Column(String(128), nullable=False)
rpc_userid = Column(String(128), nullable=False)
rpc_password = Column(String(128), unique=True, nullable=False)
propinsi_id = Column(Integer, ForeignKey('propinsis.id'))
propinsi = relationship("Propinsi", backref="register")
dati2_id = Column(Integer, ForeignKey('dati2s.id'))
dati2 = relationship("Dati2", backref="register")
class Invoices(Base, DefaultModel):
__tablename__ = 'invoices'
__table_args__ = {'extend_existing':True}
kode = Column(String(5), unique=True, nullable=False)
nama = Column(String(30), unique=True, nullable=False)
alamat = Column(String(128), unique=True, nullable=False)
register_id = Column(Integer, ForeignKey("registers.id"), nullable=False)
jumlah = Column(BigInteger, nullable = False)
tgl_invoice = Column(Date, nullable = False)
class Payments(Base, DefaultModel):
__tablename__ = 'payments'
__table_args__ = {'extend_existing':True}
invoice_id = Column(Integer, ForeignKey("invoices.id"), nullable=False)
jumlah = Column(BigInteger, nullable = False)
tgl_bayar = Column(Date, nullable = False)
jns_bayar = Column(SmallInteger, nullable = False)
posted = Column(SmallInteger, nullable=False, default=0)
| 39.53 | 85 | 0.698963 | 3,422 | 0.865672 | 0 | 0 | 363 | 0.091829 | 0 | 0 | 312 | 0.078927 |
e95c5e6fc88c9d5b12bafc54c0d0afb1690c36cf
| 556 |
py
|
Python
|
tests/testLoadMapFromString.py
|
skowronskij/OGCServer
|
3fd11438180944ffa43e315c6390e89437a28f4e
|
[
"BSD-3-Clause"
] | 90 |
2015-04-30T22:13:14.000Z
|
2022-02-16T17:30:11.000Z
|
tests/testLoadMapFromString.py
|
skowronskij/OGCServer
|
3fd11438180944ffa43e315c6390e89437a28f4e
|
[
"BSD-3-Clause"
] | 6 |
2019-09-09T06:07:27.000Z
|
2020-06-17T09:52:49.000Z
|
tests/testLoadMapFromString.py
|
skowronskij/OGCServer
|
3fd11438180944ffa43e315c6390e89437a28f4e
|
[
"BSD-3-Clause"
] | 28 |
2015-05-12T09:08:17.000Z
|
2021-07-02T11:53:29.000Z
|
import nose
import os
from ogcserver.WMS import BaseWMSFactory
def test_wms_capabilities():
base_path, tail = os.path.split(__file__)
file_path = os.path.join(base_path, 'mapfile_encoding.xml')
wms = BaseWMSFactory()
with open(file_path) as f:
settings = f.read()
wms.loadXML(xmlstring=settings, basepath=base_path)
wms.finalize()
if len(wms.layers) != 1:
raise Exception('Incorrect number of layers')
if len(wms.styles) != 1:
raise Exception('Incorrect number of styles')
return True
| 27.8 | 63 | 0.676259 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 78 | 0.140288 |
e95cb362167c296066d686777e92e50fed2083ee
| 977 |
py
|
Python
|
core/models/transaction.py
|
soslaio/openme
|
b6e8c87279363a62992b5db14646dbaa655dc936
|
[
"MIT"
] | null | null | null |
core/models/transaction.py
|
soslaio/openme
|
b6e8c87279363a62992b5db14646dbaa655dc936
|
[
"MIT"
] | null | null | null |
core/models/transaction.py
|
soslaio/openme
|
b6e8c87279363a62992b5db14646dbaa655dc936
|
[
"MIT"
] | null | null | null |
from django.db import models
from .base import Base
class Transaction(Base):
date = models.DateField()
description = models.CharField(max_length=400)
ammount = models.DecimalField(max_digits=10, decimal_places=2)
category = models.ForeignKey('Category', on_delete=models.CASCADE)
from_account = models.ForeignKey('Account', on_delete=models.CASCADE, related_name='from_transactions')
to_account = models.ForeignKey('Account', on_delete=models.CASCADE, null=True, blank=True,
related_name='to_transactions')
notes = models.TextField(null=True, blank=True)
consolidated = models.BooleanField(default=False)
@property
def is_transfer(self):
return self.from_account and self.to_account
@property
def type(self):
if self.is_transfer:
return 'transfer'
return 'credit' if self.ammount > 0 else 'debit'
def __str__(self):
return self.description
| 33.689655 | 107 | 0.69089 | 921 | 0.942682 | 0 | 0 | 234 | 0.239509 | 0 | 0 | 89 | 0.091095 |
e95f809c079ce79cbabf21b0bd9fca926c8f6149
| 864 |
py
|
Python
|
setup.py
|
mikemalinowski/insomnia
|
ea637e5eba608eacd1731239f7ddf6bb91aacc9e
|
[
"MIT"
] | 2 |
2019-02-28T09:58:55.000Z
|
2020-03-06T05:03:34.000Z
|
setup.py
|
mikemalinowski/insomnia
|
ea637e5eba608eacd1731239f7ddf6bb91aacc9e
|
[
"MIT"
] | null | null | null |
setup.py
|
mikemalinowski/insomnia
|
ea637e5eba608eacd1731239f7ddf6bb91aacc9e
|
[
"MIT"
] | null | null | null |
import setuptools
try:
with open('README.md', 'r') as fh:
long_description = fh.read()
except:
long_description = ''
setuptools.setup(
name='blackout',
version='1.0.4',
author='Mike Malinowski',
author_email='[email protected]',
description='A python package making it easy to drop a multi-module package from sys.modules',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/mikemalinowski/blackout',
packages=setuptools.find_packages(),
entry_points="""
[console_scripts]
blackout = blackout:blackout
""",
py_modules=["blackout"],
classifiers=[
'Programming Language :: Python',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
)
| 28.8 | 99 | 0.635417 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 405 | 0.46875 |
e96093e48bfaf833c59e3c55fbafb9b3d90f3407
| 710 |
py
|
Python
|
src/hypermd/html/html.py
|
Riib11/HyperMD
|
d6921b701635356236b00d0a8794ab68d733ad59
|
[
"MIT"
] | null | null | null |
src/hypermd/html/html.py
|
Riib11/HyperMD
|
d6921b701635356236b00d0a8794ab68d733ad59
|
[
"MIT"
] | null | null | null |
src/hypermd/html/html.py
|
Riib11/HyperMD
|
d6921b701635356236b00d0a8794ab68d733ad59
|
[
"MIT"
] | null | null | null |
class Element:
def __init__(self, name, single):
self.name = name
self.single = single
self.attrs = {}
self.content = ""
def set_attr(self, k, v): self.attrs[k] = v
def get_attr(self, v): return self.attrs[k]
def tohtml(self):
attrs = (" " + " ".join([ "%s=\"%s\"" % (k,v)
for k,v in self.attrs.items() ])
if len(self.attrs) > 0
else "")
if self.single:
s = "<%s%s>" % (self.name, attrs)
return s
else:
s = "<%s%s>" % (self.name, attrs)
s += self.content
s += "</%s>" % self.name
return s
__str__ = tohtml; __repr__ = tohtml
| 29.583333 | 53 | 0.453521 | 710 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 44 | 0.061972 |
e960b0fabb4246bd94bb826b4cf1e4c34f2696b5
| 2,590 |
py
|
Python
|
vk_music/__main__.py
|
w1r2p1/vk_music
|
066fa623f87a6351846011c477cff2aad2943bc5
|
[
"MIT"
] | 7 |
2015-01-26T08:46:12.000Z
|
2020-08-29T13:07:07.000Z
|
vk_music/__main__.py
|
w1r2p1/vk_music
|
066fa623f87a6351846011c477cff2aad2943bc5
|
[
"MIT"
] | 3 |
2015-04-29T20:34:53.000Z
|
2015-07-08T08:43:47.000Z
|
vk_music/__main__.py
|
sashasimkin/vk_music
|
3814909ffd914103e80734e51b01dddb458b1bfe
|
[
"MIT"
] | 4 |
2016-04-24T14:09:48.000Z
|
2019-11-23T14:50:46.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import argparse
from subprocess import call
from .vk_music import VkMusic
from .exceptions import AlreadyRunningError
from .defaults import SafeFsStorage
def main():
parser = argparse.ArgumentParser()
parser.add_argument('dir', type=str, nargs='?', help="Directory for synchronization")
parser.add_argument("-uid", type=int, default=60411837, help="Vk user id") # Default is my VK id :-)
parser.add_argument("-client_id", type=int, default=2970439, help="Application id") # Application ID from VK
parser.add_argument("--threads", "-t", type=int, default=2, help="Number of threads to use")
parser.add_argument("-token", type=str, help="access token to use")
parser.add_argument("-token_dir", type=str, help="Directory where script will save token and temp data")
parser.add_argument("-f", dest='force', default=False, action='store_true', help="Ignore already running error")
parser.add_argument("-from", type=int, default=0, help="Start downloading from position")
parser.add_argument("-to", type=int, help="End downloading on position")
parser.add_argument("-redirect_url", type=str, help="Redirect url after getting token")
args = vars(parser.parse_args())
# Don't let not passed arguments to be
for k, v in args.items():
if v is None:
del args[k]
workdir = args.get('dir', '').decode('utf-8') or os.getcwd() + '/Music'
try:
# Try to create directory if not exists
if not os.path.isdir(workdir):
os.makedirs(workdir)
# Need write access to that dir
os.chmod(workdir, 0o755)
if not os.access(workdir, os.W_OK):
raise Exception('Permission denied for dir %s' % workdir)
except Exception as e:
exit("Problem with directory '%s': %s" % (workdir, e))
storage = SafeFsStorage(workdir)
try:
with VkMusic(storage, **args) as manager:
# Start working
result = manager.synchronize()
try:
call(['notify-send',
'Vk Music',
'Saved: %(saved)s\n'
'Skipped: %(skipped)s\n'
'Removed: %(removed)s\n'
'Not removed: %(not_removed)s' % result])
except Exception:
pass
except AlreadyRunningError:
# If is running - terminate
print('Other sync process is running. Please wait')
if __name__ == '__main__':
main()
| 39.846154 | 116 | 0.622008 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 896 | 0.345946 |
e962ef78829cd251169298d5da18fd8a33cb94ba
| 950 |
py
|
Python
|
misc/convert.py
|
Fusion-Goettingen/ExtendedTargetTrackingToolbox
|
945ede661e9258a8f1ca8abc00e25727fedf3ac7
|
[
"MIT"
] | 40 |
2018-07-30T13:07:23.000Z
|
2021-08-30T05:53:29.000Z
|
misc/convert.py
|
GitRooky/ExtendedTargetTrackingToolbox
|
945ede661e9258a8f1ca8abc00e25727fedf3ac7
|
[
"MIT"
] | null | null | null |
misc/convert.py
|
GitRooky/ExtendedTargetTrackingToolbox
|
945ede661e9258a8f1ca8abc00e25727fedf3ac7
|
[
"MIT"
] | 21 |
2018-10-03T11:50:00.000Z
|
2022-01-11T06:41:24.000Z
|
__author__ = "Jens Honer"
__copyright__ = "Copyright 2018, Jens Honer Tracking Toolbox"
__email__ = "-"
__license__ = "mit"
__version__ = "1.0"
__status__ = "Prototype"
import numpy as np
_bbox_sign_factors = np.asarray(
[
[1.0, 1.0],
[0.0, 1.0],
[-1.0, 1.0],
[-1.0, 0.0],
[-1.0, -1.0],
[0.0, -1.0],
[1.0, -1.0],
[1.0, 0.0],
], dtype='f4')
def convert_rectangle_to_eight_point(bboxes):
pt_set = np.zeros((len(bboxes), 8, 2))
pt_set[:] = bboxes['center_xy'][:, None, :]
for i, bbox in enumerate(bboxes):
s_phi_offset, c_phi_offset = np.sin(bbox['orientation']), np.cos(bbox['orientation'])
rot = np.array([[c_phi_offset, - s_phi_offset], [s_phi_offset, c_phi_offset]])
offset_xy = np.dot(_bbox_sign_factors * 0.5 * bbox['dimension'], rot.T)
pt_set[i, :, :] += offset_xy
return pt_set
| 27.142857 | 93 | 0.548421 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 133 | 0.14 |
e96535fbd6c7f8ed1b7186f2611a4c30b772e4ba
| 866 |
py
|
Python
|
tbx/settings/dev.py
|
elviva404/wagtail-torchbox
|
718d9e2c4337073f010296932d369c726a01dbd3
|
[
"MIT"
] | 103 |
2015-02-24T17:58:21.000Z
|
2022-03-23T08:08:58.000Z
|
tbx/settings/dev.py
|
elviva404/wagtail-torchbox
|
718d9e2c4337073f010296932d369c726a01dbd3
|
[
"MIT"
] | 145 |
2015-01-13T17:13:43.000Z
|
2022-03-29T12:56:20.000Z
|
tbx/settings/dev.py
|
elviva404/wagtail-torchbox
|
718d9e2c4337073f010296932d369c726a01dbd3
|
[
"MIT"
] | 57 |
2015-01-03T12:00:37.000Z
|
2022-02-09T13:11:30.000Z
|
from .base import * # noqa
DEBUG = True
SECURE_SSL_REDIRECT = False
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "CHANGEME!!!"
# Enable FE component library
PATTERN_LIBRARY_ENABLED = True
INTERNAL_IPS = ("127.0.0.1", "10.0.2.2")
BASE_URL = "http://localhost:8000"
# URL to direct preview requests to
PREVIEW_URL = "http://localhost:8001/preview"
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
AUTH_PASSWORD_VALIDATORS = []
# Enable Wagtail's style guide in Wagtail's settings menu.
# http://docs.wagtail.io/en/stable/contributing/styleguide.html
INSTALLED_APPS += ["wagtail.contrib.styleguide"] # noqa
# Set URL for the preview iframe. Should point at Gatsby.
PREVIEW_URL = "http://localhost:8003/preview/"
MEDIA_PREFIX = BASE_URL
try:
from .local import * # noqa
except ImportError:
pass
| 23.405405 | 66 | 0.742494 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 522 | 0.602771 |
e965d671abefc6771ef8f31d4904d2ca170eeb5c
| 84 |
py
|
Python
|
EKF/swig/python/test.py
|
fx815/EKF
|
ac33a6500d6cedd441758cae2f9aa7192f0f2a87
|
[
"BSD-3-Clause"
] | 38 |
2017-09-03T18:27:48.000Z
|
2022-01-25T04:56:57.000Z
|
EKF/swig/python/test.py
|
fx815/EKF
|
ac33a6500d6cedd441758cae2f9aa7192f0f2a87
|
[
"BSD-3-Clause"
] | 1 |
2020-08-24T03:28:49.000Z
|
2020-08-24T03:28:49.000Z
|
EKF/swig/python/test.py
|
fx815/EKF
|
ac33a6500d6cedd441758cae2f9aa7192f0f2a87
|
[
"BSD-3-Clause"
] | 10 |
2018-05-11T18:57:27.000Z
|
2022-03-10T02:53:54.000Z
|
import swig_example
swig_example.swig_example_hello()
swig_example.link_liba_hello()
| 28 | 33 | 0.892857 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
e9667bd424694f5af16378d0dfcd7bc9fa58a7a6
| 3,356 |
py
|
Python
|
src/base/local_dataset.py
|
wenyushi451/Deep-SAD-PyTorch
|
168d31f538a50fb029739206994ea5517d907853
|
[
"MIT"
] | null | null | null |
src/base/local_dataset.py
|
wenyushi451/Deep-SAD-PyTorch
|
168d31f538a50fb029739206994ea5517d907853
|
[
"MIT"
] | null | null | null |
src/base/local_dataset.py
|
wenyushi451/Deep-SAD-PyTorch
|
168d31f538a50fb029739206994ea5517d907853
|
[
"MIT"
] | null | null | null |
from torch.utils.data import Dataset
from torchvision.transforms import transforms
from sklearn.model_selection import train_test_split
import os
import glob
import torch
import numpy as np
from PIL import Image
import pdb
class LocalDataset(Dataset):
def __init__(
self,
root: str,
dataset_name: str,
target_transform,
train=True,
random_state=None,
split=True,
random_effect=True,
):
super(Dataset, self).__init__()
self.target_transform = target_transform
self.classes = [0, 1]
self.root = root
self.train = train # training set or test set
# self.dataset_path = os.path.join(self.root, self.dataset_name)
# class_idx/image
X = np.array(glob.glob(os.path.join(self.root, "*/*.[jp][pn][g]")))
y = [int(i.split("/")[-2]) for i in X]
y = np.array(y)
if split:
idx_norm = y == 0
idx_out = y != 0
# 80% data for training and 20% for testing; keep outlier ratio
# pdb.set_trace()
X_train_norm, X_test_norm, y_train_norm, y_test_norm = train_test_split(
X[idx_norm], y[idx_norm], test_size=0.1, random_state=random_state, stratify=y[idx_norm]
)
X_train_out, X_test_out, y_train_out, y_test_out = train_test_split(
X[idx_out], y[idx_out], test_size=0.1, random_state=random_state, stratify=y[idx_out]
)
X_train = np.concatenate((X_train_norm, X_train_out))
X_test = np.concatenate((X_test_norm, X_test_out))
y_train = np.concatenate((y_train_norm, y_train_out))
y_test = np.concatenate((y_test_norm, y_test_out))
if self.train:
self.data = X_train
self.targets = torch.tensor(y_train, dtype=torch.int64)
else:
self.data = X_test
self.targets = torch.tensor(y_test, dtype=torch.int64)
else:
self.data = X
self.targets = torch.tensor(y, dtype=torch.int64)
self.semi_targets = torch.zeros_like(self.targets)
# for training we will add brightness variance
if random_effect:
self.transform = transforms.Compose(
[
# transforms.ColorJitter(
# brightness=0.5 + int(np.random.rand(1)), contrast=0.5 + int(np.random.rand(1))
# ),
# saturation=0.5 + int(np.random.rand(1)),
# hue=0.5 + int(np.random.rand(1))),
transforms.Resize((224, 224)),
transforms.ToTensor(),
]
)
# for testing
else:
self.transform = transforms.Compose([transforms.Resize((224, 224)), transforms.ToTensor()])
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (sample, target, semi_target, index)
"""
data = Image.open(self.data[index])
data = self.transform(data)
sample, target, semi_target = data, 0 if self.targets[index] == 0 else 1, int(self.semi_targets[index])
return sample, target, semi_target, index
def __len__(self):
return len(self.data)
| 35.326316 | 111 | 0.56615 | 3,129 | 0.93236 | 0 | 0 | 0 | 0 | 0 | 0 | 591 | 0.176103 |
e9676f23c227a8e3dbd2af8223b0d6f349a5e56a
| 408 |
py
|
Python
|
envdsys/envdaq/migrations/0009_auto_20210415_2246.py
|
NOAA-PMEL/envDataSystem
|
4db4a3569d2329658799a3eef06ce36dd5c0597d
|
[
"Unlicense"
] | 1 |
2021-11-06T19:22:53.000Z
|
2021-11-06T19:22:53.000Z
|
envdsys/envdaq/migrations/0009_auto_20210415_2246.py
|
NOAA-PMEL/envDataSystem
|
4db4a3569d2329658799a3eef06ce36dd5c0597d
|
[
"Unlicense"
] | 25 |
2019-06-18T20:40:36.000Z
|
2021-07-23T20:56:48.000Z
|
envdsys/envdaq/migrations/0009_auto_20210415_2246.py
|
NOAA-PMEL/envDataSystem
|
4db4a3569d2329658799a3eef06ce36dd5c0597d
|
[
"Unlicense"
] | null | null | null |
# Generated by Django 3.1.7 on 2021-04-15 22:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('envdaq', '0008_interface'),
]
operations = [
migrations.AlterField(
model_name='interface',
name='config',
field=models.JSONField(default=dict, verbose_name='Configuration'),
),
]
| 21.473684 | 79 | 0.607843 | 315 | 0.772059 | 0 | 0 | 0 | 0 | 0 | 0 | 105 | 0.257353 |
e9681f3574652f7f41d0d0d5c77f92d6ff04b1eb
| 2,020 |
py
|
Python
|
works/migrations/0001_initial.py
|
wildcodear/wildcode_project
|
95d396ad3acbed08f607f618d6ada9d04b351bd8
|
[
"MIT"
] | null | null | null |
works/migrations/0001_initial.py
|
wildcodear/wildcode_project
|
95d396ad3acbed08f607f618d6ada9d04b351bd8
|
[
"MIT"
] | null | null | null |
works/migrations/0001_initial.py
|
wildcodear/wildcode_project
|
95d396ad3acbed08f607f618d6ada9d04b351bd8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('documents', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Client',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
('address', models.TextField()),
('phone', models.CharField(max_length=50)),
('city', models.CharField(max_length=50)),
('country', models.CharField(max_length=50)),
('cuit', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Company',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
('logo', models.ImageField(upload_to=b'companies_logos')),
('address', models.TextField(null=True, blank=True)),
('phone', models.CharField(max_length=100)),
('email', models.EmailField(max_length=254)),
('web_url', models.URLField(null=True, blank=True)),
],
),
migrations.CreateModel(
name='Work',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
('description', models.TextField(null=True, blank=True)),
('hours', models.FloatField(null=True, blank=True)),
('observations', models.TextField(null=True, blank=True)),
('proform', models.ForeignKey(related_name='works', to='documents.Proform')),
],
),
]
| 40.4 | 114 | 0.550495 | 1,911 | 0.94604 | 0 | 0 | 0 | 0 | 0 | 0 | 275 | 0.136139 |
e96a119d9fa6a43015c4274d98d22fcf31a25276
| 3,181 |
py
|
Python
|
2020/python/template.py
|
tadhg-ohiggins/advent-of-code
|
d0f113955940e69cbe0953607f62862f8a8bb830
|
[
"CC0-1.0"
] | 1 |
2021-12-04T18:09:44.000Z
|
2021-12-04T18:09:44.000Z
|
2020/python/template.py
|
tadhg-ohiggins/advent-of-code
|
d0f113955940e69cbe0953607f62862f8a8bb830
|
[
"CC0-1.0"
] | null | null | null |
2020/python/template.py
|
tadhg-ohiggins/advent-of-code
|
d0f113955940e69cbe0953607f62862f8a8bb830
|
[
"CC0-1.0"
] | null | null | null |
from tutils import pdb
from tutils import subprocess
from tutils import Counter
from tutils import partial
from tutils import reduce
from tutils import wraps
from tutils import count
from tutils import groupby
from tutils import product
from tutils import prod
from tutils import itemgetter
from tutils import Path
from tutils import ascii_lowercase
from tutils import ascii_digits
from tutils import Any
from tutils import Callable
from tutils import List
from tutils import Iterable
from tutils import IterableS
from tutils import Optional
from tutils import Sequence
from tutils import OInt
from tutils import ODict
from tutils import UListStr
from tutils import Tuple
from tutils import Union
from tutils import hexc
from tutils import compose_left
from tutils import concat
from tutils import curry
from tutils import do
from tutils import excepts
from tutils import iterate
from tutils import keyfilter
from tutils import pluck
from tutils import pipe
from tutils import sliding_window
from tutils import toolz_pick
from tutils import toolz_omit
from tutils import omit
from tutils import pick
from tutils import add_debug
from tutils import add_debug_list
from tutils import run_process
from tutils import until_stable
from tutils import oxford
from tutils import excepts_wrap
from tutils import nextwhere
from tutils import noncontinuous
from tutils import lnoncontinuous
from tutils import lfilter
from tutils import lcompact
from tutils import lmap
from tutils import lpluck
from tutils import lstrip
from tutils import splitstrip
from tutils import splitstriplines
from tutils import seq_to_dict
from tutils import split_to_dict
from tutils import c_map
from tutils import c_lmap
from tutils import is_char_az
from tutils import is_char_hex
from tutils import is_char_az09
from tutils import filter_str
from tutils import filter_az
from tutils import filter_az09
from tutils import filter_hex
from tutils import add_pprint
from tutils import add_pprinting
from tutils import make_incrementer
from tutils import adjacent_transforms
from tutils import load_input
from tutils import process_input
from tutils import tests
from tutils import load_and_process_input
from tutils import run_tests
""" END HELPER FUNCTIONS """
DAY = "00"
INPUT, TEST = f"input-{DAY}.txt", f"test-input-{DAY}.txt"
TA1 = None
TA2 = None
ANSWER1 = None
ANSWER2 = None
def process_one(data: Any) -> Any:
pdb.set_trace()
return
def process_two(data: Any) -> Any:
pdb.set_trace()
return
def cli_main() -> None:
input_funcs = [splitstriplines]
data = load_and_process_input(INPUT, input_funcs)
run_tests(TEST, TA1, TA2, ANSWER1, input_funcs, process_one, process_two)
answer_one = process_one(data)
if ANSWER1 is not None:
if answer_one != ANSWER1:
pdb.set_trace()
assert answer_one == ANSWER1
print("Answer one:", answer_one)
if ANSWER1 is not None:
answer_two = process_two(data)
if ANSWER2 is not None:
if answer_two != ANSWER2:
pdb.set_trace()
assert answer_two == ANSWER2
print("Answer two:", answer_two)
if __name__ == "__main__":
cli_main()
| 25.653226 | 77 | 0.786231 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 109 | 0.034266 |
e96a9a36758616e89fb2f6e13a5fba67dd556005
| 323 |
py
|
Python
|
setup.py
|
alkaupp/weather
|
0aab40b26064ae8ebc4b0868da828a07a4c39631
|
[
"MIT"
] | null | null | null |
setup.py
|
alkaupp/weather
|
0aab40b26064ae8ebc4b0868da828a07a4c39631
|
[
"MIT"
] | null | null | null |
setup.py
|
alkaupp/weather
|
0aab40b26064ae8ebc4b0868da828a07a4c39631
|
[
"MIT"
] | null | null | null |
from setuptools import setup
setup(
name='weather',
version='0.1',
description='CLI frontend for querying weather',
packages=['weather'],
entry_points={
'console_scripts': ['weather = weather.__main__:main']
},
author='Aleksi Kauppila',
author_email='[email protected]'
)
| 20.1875 | 62 | 0.656347 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 152 | 0.470588 |
e96abeb27deaf4502ac786cdfa144e452aa4f116
| 271 |
py
|
Python
|
mordor_magic/mordor_app/admin.py
|
Far4Ru/mordor-magic-2
|
7082ae8cc0b12154f74f4f58f9cad8f0325a8f57
|
[
"MIT"
] | null | null | null |
mordor_magic/mordor_app/admin.py
|
Far4Ru/mordor-magic-2
|
7082ae8cc0b12154f74f4f58f9cad8f0325a8f57
|
[
"MIT"
] | null | null | null |
mordor_magic/mordor_app/admin.py
|
Far4Ru/mordor-magic-2
|
7082ae8cc0b12154f74f4f58f9cad8f0325a8f57
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .models import *
admin.site.register(CharacterEvent)
admin.site.register(Event)
admin.site.register(CharacterOwner)
admin.site.register(Character)
admin.site.register(User, UserAdmin)
| 27.1 | 47 | 0.830258 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
e96b4f43c95a1b4ce5857c21e88b3785232408aa
| 9,142 |
py
|
Python
|
main.py
|
Lmy0217/Flight
|
faf5045712c4d28e0ca3df408308a5e3b9bf8038
|
[
"MIT"
] | 2 |
2019-03-31T01:42:29.000Z
|
2019-05-16T06:31:50.000Z
|
main.py
|
Lmy0217/Flight
|
faf5045712c4d28e0ca3df408308a5e3b9bf8038
|
[
"MIT"
] | 1 |
2019-03-31T01:45:25.000Z
|
2019-04-17T05:46:35.000Z
|
main.py
|
Lmy0217/Flight
|
faf5045712c4d28e0ca3df408308a5e3b9bf8038
|
[
"MIT"
] | 1 |
2019-03-31T01:42:34.000Z
|
2019-03-31T01:42:34.000Z
|
#coding=utf-8
import tkinter as tk
from tkinter import ttk
from tkinter import scrolledtext
from tkinter import messagebox as mBox
from tkinter import filedialog
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import matplotlib.pyplot as plt
import datetime
import threading
import flight
import outlier
import analytics
# 标题
win = tk.Tk()
win.title("机票数据爬取分析预测")
win.resizable(0, 0)
# 三个页面
tabControl = ttk.Notebook(win)
tab1 = ttk.Frame(tabControl)
tabControl.add(tab1, text='爬取')
tab2 = ttk.Frame(tabControl)
tabControl.add(tab2, text='分析')
tab3 = ttk.Frame(tabControl)
tabControl.add(tab3, text='预测')
tabControl.pack(expand=1, fill="both")
# 参数框
monty = ttk.LabelFrame(tab1, text='')
monty.grid(column=0, row=0, padx=8, pady=4)
labelsFrame = ttk.LabelFrame(monty, text=' 参数 ')
labelsFrame.grid(column=0, row=0)
# 城市标签
ttk.Label(labelsFrame, text="城市:").grid(column=0, row=0, sticky='W')
# 城市输入框
city = tk.Text(labelsFrame, width=20, height=10)
city.insert(tk.END, "'SHA', 'SIA', 'BJS', 'CAN', 'SZX', 'CTU', 'HGH', 'WUH', 'CKG', 'TAO', 'CSX', 'NKG', 'XMN', 'KMG', 'DLC', 'TSN', 'CGO', 'SYX', 'TNA', 'FOC'")
city.grid(column=1, row=0, sticky='W')
# 起始日期标签
ttk.Label(labelsFrame, text="起始日期:").grid(column=0, row=1, sticky='W')
# 起始日期输入框
date1 = tk.StringVar()
da_days = datetime.datetime.now() + datetime.timedelta(days=1)
date1.set(da_days.strftime('%Y-%m-%d'))
date1Entered = ttk.Entry(labelsFrame, textvariable=date1)
date1Entered.grid(column=1, row=1, sticky='W')
# 截止日期标签
ttk.Label(labelsFrame, text="截止日期:").grid(column=0, row=2, sticky='W')
# 截止日期输入框
date2 = tk.StringVar()
da_days2 = datetime.datetime.now() + datetime.timedelta(days=1)
date2.set(da_days2.strftime('%Y-%m-%d'))
date2Entered = ttk.Entry(labelsFrame, textvariable=date2)
date2Entered.grid(column=1, row=2, sticky='W')
# Log框
scrolW = 91;
scrolH = 37;
scr = scrolledtext.ScrolledText(monty, width=scrolW, height=scrolH, wrap=tk.WORD)
scr.grid(column=3, row=0, sticky='WE', rowspan=5)
# 爬取数据
def spider_flight():
spider_flight.flight = flight.spider(city.get("0.0", "end"), date1.get(), date2.get(), scr)
spider_flight.flight = None
def run_spider_flight():
scr.insert(tk.END, datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\n爬取数据:\n城市:'
+ str(city.get("0.0", "end")) + '\n日期:' + str(date1.get()) + ' 至 ' + str(date2.get()) + '\n\n')
t = threading.Thread(target=spider_flight)
t.start()
# 爬取标签
spider = ttk.Button(labelsFrame, text="爬取", width=10, command=run_spider_flight)
spider.grid(column=0, row=4, sticky='W')
# 保存文件
def save_file():
if spider_flight.flight is not None:
fname = tk.filedialog.asksaveasfilename(filetypes=[("JSON", ".json")], defaultextension='.json')
if fname is not '':
spider_flight.flight.save(fname)
scr.insert(tk.END, datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\n数据保存到 ' + fname + '\n\n')
else:
mBox.showwarning('Python Message Warning Box', '请先爬取数据!')
# 保存标签
save = ttk.Button(labelsFrame, text="保存", width=10, command=save_file)
save.grid(column=1, row=4, sticky='E')
for child in labelsFrame.winfo_children():
child.grid_configure(padx=8, pady=4)
for child in monty.winfo_children():
child.grid_configure(padx=3, pady=1)
# 参数框
monty2 = ttk.LabelFrame(tab2, text='')
monty2.grid(column=0, row=0, padx=8, pady=4)
labelsFrame2 = ttk.LabelFrame(monty2, text=' 参数 ')
labelsFrame2.grid(column=0, row=0)
# Log框
scrolW = 34;
scrolH = 25;
scr2 = scrolledtext.ScrolledText(monty2, width=scrolW, height=scrolH, wrap=tk.WORD)
scr2.grid(column=0, row=3, sticky='WE')
# 数据标签
ttk.Label(labelsFrame2, text="数据:").grid(column=0, row=0, sticky='W')
# 打开文件
def data_file():
fname = tk.filedialog.askopenfilename(filetypes=[("JSON", ".json")], defaultextension='.json')
if fname is not '':
data_file.outlier = outlier.Outlier(fname)
scr2.insert(tk.END, datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\n打开文件 ' + fname + '\n\n')
data_file.outlier = None
# 打开文件按钮
data = ttk.Button(labelsFrame2, text="打开文件", width=10, command=data_file)
data.grid(column=1, row=0, sticky='E')
# 异常数标签
ttk.Label(labelsFrame2, text="异常数:").grid(column=0, row=1, sticky='W')
# 异常数输入框
diff = tk.IntVar()
diff.set(5)
diffEntered = ttk.Entry(labelsFrame2, textvariable=diff)
diffEntered.grid(column=1, row=1, sticky='W')
# 图框
def drawdiff():
try:
num_diff = int(diffEntered.get())
except:
num_diff = 5
diffEntered.delete(0, tk.END)
diffEntered.insert(0, 5)
drawdiff.f.clf()
drawdiff.out = data_file.outlier.extreme(drawdiff.f, scr2, num_diff)
drawdiff.canvas.show()
drawdiff.out = None
drawdiff.f = plt.figure()
drawdiff.canvas = FigureCanvasTkAgg(drawdiff.f, master=monty2)
drawdiff.canvas.show()
drawdiff.canvas.get_tk_widget().grid(column=1, row=0, rowspan=4)
def run_drawdiff():
if data_file.outlier is not None:
scr2.insert(tk.END, datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\n分析数据(设定 '
+ str(diffEntered.get()) + ' 个异常值)...\n\n异常值:\n')
t = threading.Thread(target=drawdiff)
t.start()
else:
mBox.showwarning('Python Message Warning Box', '请先打开文件!')
# 分析按钮
da = ttk.Button(labelsFrame2, text="分析", width=10, command=run_drawdiff)
da.grid(column=0, row=2, sticky='W')
# 保存文件
def save_file2():
if drawdiff.out is not None:
fname = tk.filedialog.asksaveasfilename(filetypes=[("JSON", ".json")], defaultextension='.json')
if fname is not '':
with open(fname, 'w') as f1:
f1.write(str(drawdiff.out))
scr2.insert(tk.END, datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\n异常值保存到 ' + fname + '\n\n')
else:
mBox.showwarning('Python Message Warning Box', '请先分析数据!')
# 保存按钮
save2 = ttk.Button(labelsFrame2, text="保存", width=10, command=save_file2)
save2.grid(column=1, row=2, sticky='E')
for child in labelsFrame2.winfo_children():
child.grid_configure(padx=8, pady=4)
for child in monty2.winfo_children():
child.grid_configure(padx=8, pady=4)
# 参数框
monty3 = ttk.LabelFrame(tab3, text='')
monty3.grid(column=0, row=0, padx=8, pady=4)
labelsFrame3 = ttk.LabelFrame(monty3, text=' 参数 ')
labelsFrame3.grid(column=0, row=0)
# Log框
scrolW = 34;
scrolH = 25;
scr3 = scrolledtext.ScrolledText(monty3, width=scrolW, height=scrolH, wrap=tk.WORD)
scr3.grid(column=0, row=3, sticky='WE')
# 数据标签
ttk.Label(labelsFrame3, text="数据:").grid(column=0, row=0, sticky='W')
# 打开文件
def data_file2():
fname = tk.filedialog.askopenfilename(filetypes=[("JSON", ".json")], defaultextension='.json')
if fname is not '':
data_file2.analytics = analytics.Analytics(fname)
scr3.insert(tk.END, datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\n打开文件 ' + fname + '\n\n')
data_file2.analytics = None
# 打开文件按钮
data2 = ttk.Button(labelsFrame3, text="打开文件", width=10, command=data_file2)
data2.grid(column=1, row=0, sticky='E')
# 预测天数标签
ttk.Label(labelsFrame3, text="预测天数:").grid(column=0, row=1, sticky='W')
# 预测天数输入框
days = tk.IntVar()
days.set(30)
daysEntered = ttk.Entry(labelsFrame3, textvariable=days)
daysEntered.grid(column=1, row=1, sticky='W')
# 图框
def drawpredict():
try:
num_day = int(daysEntered.get())
except:
num_day = 30
daysEntered.delete(0, tk.END)
daysEntered.insert(0, 30)
# 清空图像,以使得前后两次绘制的图像不会重叠
drawpredict.f.clf()
drawpredict.out = data_file2.analytics.predict(num_day, scr3)
drawpredict.canvas.show()
drawpredict.out = None
drawpredict.f = plt.figure()
drawpredict.canvas = FigureCanvasTkAgg(drawpredict.f, master=monty3)
drawpredict.canvas.show()
drawpredict.canvas.get_tk_widget().grid(column=1, row=0, rowspan=4)
def run_drawpredict():
if data_file2.analytics is not None:
scr3.insert(tk.END, datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\n分析数据(设定预测 '
+ str(daysEntered.get()) + ' 天)...\n\n训练过程:\n轮次/总轮次 [损失]\n')
t = threading.Thread(target=drawpredict)
t.start()
else:
mBox.showwarning('Python Message Warning Box', '请先打开文件!')
# 预测按钮
pr = ttk.Button(labelsFrame3, text="预测", width=10, command=run_drawpredict)
pr.grid(column=0, row=2, sticky='W')
# 保存文件
def save_file3():
if drawpredict.out is not None:
fname = tk.filedialog.asksaveasfilename(filetypes=[("JSON", ".json")], defaultextension='.json')
with open(fname, 'w') as f1: # 打开文件
f1.write(str(drawpredict.out))
scr3.insert(tk.END, datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\n训练过程和预测结果保存到 ' + fname + '\n\n')
else:
mBox.showwarning('Python Message Warning Box', '请先预测数据!')
# 保存按钮
save = ttk.Button(labelsFrame3, text="保存", width=10, command=save_file3)
save.grid(column=1, row=2, sticky='E')
for child in labelsFrame3.winfo_children():
child.grid_configure(padx=8, pady=4)
for child in monty3.winfo_children():
child.grid_configure(padx=8, pady=4)
if __name__ == "__main__":
win.mainloop()
| 27.371257 | 161 | 0.669438 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,029 | 0.20599 |
e96b8708dc8be78814c697d042595105e2d873c2
| 80 |
py
|
Python
|
Getting_Started_With_Raspberry_Pi_Pico/variable/code.py
|
gamblor21/Adafruit_Learning_System_Guides
|
f5dab4a758bc82d0bfc3c299683fe89dc093912a
|
[
"MIT"
] | 665 |
2017-09-27T21:20:14.000Z
|
2022-03-31T09:09:25.000Z
|
Getting_Started_With_Raspberry_Pi_Pico/variable/code.py
|
gamblor21/Adafruit_Learning_System_Guides
|
f5dab4a758bc82d0bfc3c299683fe89dc093912a
|
[
"MIT"
] | 641 |
2017-10-03T19:46:37.000Z
|
2022-03-30T18:28:46.000Z
|
Getting_Started_With_Raspberry_Pi_Pico/variable/code.py
|
gamblor21/Adafruit_Learning_System_Guides
|
f5dab4a758bc82d0bfc3c299683fe89dc093912a
|
[
"MIT"
] | 734 |
2017-10-02T22:47:38.000Z
|
2022-03-30T14:03:51.000Z
|
"""Example of assigning a variable."""
user_name = input("What is your name? ")
| 26.666667 | 40 | 0.6875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 59 | 0.7375 |
e96d84302227c0aff1faeef0969afac44cd9a679
| 228 |
py
|
Python
|
sitator/visualization/__init__.py
|
lekah/sitator
|
0f9c84989758eb7b76be8104a94a8d6decd27b55
|
[
"MIT"
] | 8 |
2018-10-05T18:02:24.000Z
|
2021-02-22T20:24:58.000Z
|
sitator/visualization/__init__.py
|
lekah/sitator
|
0f9c84989758eb7b76be8104a94a8d6decd27b55
|
[
"MIT"
] | 6 |
2019-02-21T04:33:01.000Z
|
2021-01-06T20:05:25.000Z
|
sitator/visualization/__init__.py
|
lekah/sitator
|
0f9c84989758eb7b76be8104a94a8d6decd27b55
|
[
"MIT"
] | 6 |
2018-08-11T21:43:59.000Z
|
2021-12-21T06:32:12.000Z
|
from .common import layers, grid, plotter, DEFAULT_COLORS, set_axes_equal
from .atoms import plot_atoms, plot_points
from .SiteNetworkPlotter import SiteNetworkPlotter
from .SiteTrajectoryPlotter import SiteTrajectoryPlotter
| 28.5 | 73 | 0.855263 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
e96dd4f2640b513649fb3793b8d1056d51d5824e
| 1,525 |
py
|
Python
|
src/futebol_wss_agent/lib/verification.py
|
nerds-ufes/futebol-optical-agent
|
405117b152ce96f09770ff5ca646bd18a72ee2fa
|
[
"Apache-2.0"
] | null | null | null |
src/futebol_wss_agent/lib/verification.py
|
nerds-ufes/futebol-optical-agent
|
405117b152ce96f09770ff5ca646bd18a72ee2fa
|
[
"Apache-2.0"
] | null | null | null |
src/futebol_wss_agent/lib/verification.py
|
nerds-ufes/futebol-optical-agent
|
405117b152ce96f09770ff5ca646bd18a72ee2fa
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2017-2022 Anderson Bravalheri, Univertity of Bristol
# High Performance Networks Group
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class UndefinedAdapter(RuntimeWarning):
"""Avoid committing grid if adapter not specified."""
DEFAULT_MESSAGE = "Please use a valid adapter when defining the grid."
def __init__(self, message=DEFAULT_MESSAGE, *args, **kwargs):
super(UndefinedAdapter, self).__init__(message, *args, **kwargs)
class FrozenObject(AttributeError):
"""Cannot set attributes in frozen objects."""
class ReadonlyAttribute(AttributeError):
"""Should be thrown when some tries to access a readonly attribute."""
class UnsupportedResolution(ValueError):
"""Grid should respect vendor grid specification."""
class OverlappedChannels(ValueError):
"""Grid channels must be disjoint."""
class OutOfRange(ValueError):
"""Value should respect range specification by vendor."""
| 34.659091 | 74 | 0.727213 | 781 | 0.512131 | 0 | 0 | 0 | 0 | 0 | 0 | 1,079 | 0.707541 |
e96debb65a28b71e00c0a2a49cd0ca34ceacdd69
| 449 |
py
|
Python
|
api/compat.py
|
fancystats/api
|
298ae6d71fa37f649bbd61ad000767242f49a698
|
[
"MIT"
] | 1 |
2015-03-20T20:35:22.000Z
|
2015-03-20T20:35:22.000Z
|
api/compat.py
|
fancystats/api
|
298ae6d71fa37f649bbd61ad000767242f49a698
|
[
"MIT"
] | null | null | null |
api/compat.py
|
fancystats/api
|
298ae6d71fa37f649bbd61ad000767242f49a698
|
[
"MIT"
] | null | null | null |
"""
Python 2/3 Compatibility
========================
Not sure we need to support anything but Python 2.7 at this point , but copied
this module over from flask-peewee for the time being.
"""
import sys
PY2 = sys.version_info[0] == 2
if PY2:
text_type = unicode
string_types = (str, unicode)
unichr = unichr
reduce = reduce
else:
text_type = str
string_types = (str, )
unichr = chr
from functools import reduce
| 17.96 | 78 | 0.639198 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 194 | 0.432071 |
e96f7f5812e754a8c8dec16943815bc6604a8f49
| 216 |
py
|
Python
|
rxkcd/models.py
|
aeternalis1/Relevant-XKCD
|
a9145974453b94ecf77a587b83bd69d974f14380
|
[
"MIT"
] | null | null | null |
rxkcd/models.py
|
aeternalis1/Relevant-XKCD
|
a9145974453b94ecf77a587b83bd69d974f14380
|
[
"MIT"
] | null | null | null |
rxkcd/models.py
|
aeternalis1/Relevant-XKCD
|
a9145974453b94ecf77a587b83bd69d974f14380
|
[
"MIT"
] | null | null | null |
class Comic:
def __init__(self, comic_num):
self.id = comic_num
self.title = ""
self.title_text = ""
self.transcript = ""
self.explanation = ""
self.img_url = ""
self.og_title = ""
self.og_ttext = ""
| 21.6 | 31 | 0.62963 | 216 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 14 | 0.064815 |
e96ffd9e458abb20cec71135158a8cf1ce09e9d1
| 888 |
py
|
Python
|
ElevatorBot/commands/funStuff/ticTacToe/vsAI.py
|
LukasSchmid97/destinyBloodoakStats
|
1420802ce01c3435ad5c283f44eb4531d9b22c38
|
[
"MIT"
] | 3 |
2019-10-19T11:24:50.000Z
|
2021-01-29T12:02:17.000Z
|
ElevatorBot/commands/funStuff/ticTacToe/vsAI.py
|
LukasSchmid97/destinyBloodoakStats
|
1420802ce01c3435ad5c283f44eb4531d9b22c38
|
[
"MIT"
] | 29 |
2019-10-14T12:26:10.000Z
|
2021-07-28T20:50:29.000Z
|
ElevatorBot/commands/funStuff/ticTacToe/vsAI.py
|
LukasSchmid97/destinyBloodoakStats
|
1420802ce01c3435ad5c283f44eb4531d9b22c38
|
[
"MIT"
] | 2 |
2019-10-13T17:11:09.000Z
|
2020-05-13T15:29:04.000Z
|
# from discord.ext.commands import Cog
# from discord_slash import SlashContext, cog_ext
# from discord_slash.utils.manage_commands import create_option
#
#
# class TicTacToeAI(Cog):
# def __init__(self, client):
# self.client = client
#
# @cog_ext.cog_subcommand(
# base="tictactoe",
# base_description="You know and love it - TicTacToe",
# name="computer",
# description="Try to beat me in a tic tac toe game",
# options=[
# create_option(
# name="easy_mode",
# description="Set this to true if you are too weak for the normal mode",
# option_type=5,
# required=False,
# ),
# ],
# )
# async def _tictactoe_ai(self, ctx: SlashContext, easy_mode: bool = False):
# pass
#
#
# def setup(client):
# TicTacToeAI(client)
| 29.6 | 89 | 0.581081 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 859 | 0.967342 |
e97022aba46b50c4fc79f34b4e0641ec360d25a6
| 3,254 |
bzl
|
Python
|
infra-sk/karma_test/index.bzl
|
bodymovin/skia-buildbot
|
1570e4e48ecb330750264d4ae6a875b5e49a37fe
|
[
"BSD-3-Clause"
] | null | null | null |
infra-sk/karma_test/index.bzl
|
bodymovin/skia-buildbot
|
1570e4e48ecb330750264d4ae6a875b5e49a37fe
|
[
"BSD-3-Clause"
] | null | null | null |
infra-sk/karma_test/index.bzl
|
bodymovin/skia-buildbot
|
1570e4e48ecb330750264d4ae6a875b5e49a37fe
|
[
"BSD-3-Clause"
] | null | null | null |
"""This module defines the karma_test rule."""
load("@infra-sk_npm//@bazel/typescript:index.bzl", "ts_library")
load("@infra-sk_npm//@bazel/rollup:index.bzl", "rollup_bundle")
load("@infra-sk_npm//karma:index.bzl", _generated_karma_test = "karma_test")
def karma_test(name, srcs, deps, entry_point = None):
"""Runs unit tests in a browser with Karma and the Mocha test runner.
When executed with `bazel test`, a headless Chrome browser will be used. This supports testing
multiple karma_test targets in parallel, and works on RBE.
When executed with `bazel run`, it prints out a URL to stdout that can be opened in the browser,
e.g. to debug the tests using the browser's developer tools. Source maps are generated.
When executed with `ibazel test`, the test runner never exits, and tests will be rerun every
time a source file is changed.
When executed with `ibazel run`, it will act the same way as `bazel run`, but the tests will be
rebuilt automatically when a source file changes. Reload the browser page to see the changes.
Args:
name: The name of the target.
srcs: The *.ts test files.
deps: The ts_library dependencies for the source files.
entry_point: File in srcs to be used as the entry point to generate the JS bundle executed by
the test runner. Optional if srcs contains only one file.
"""
if len(srcs) > 1 and not entry_point:
fail("An entry_point must be specified when srcs contains more than one file.")
if entry_point and entry_point not in srcs:
fail("The entry_point must be included in srcs.")
if len(srcs) == 1:
entry_point = srcs[0]
ts_library(
name = name + "_lib",
srcs = srcs,
deps = deps + [
# Add common test dependencies for convenience.
"@infra-sk_npm//@types/mocha",
"@infra-sk_npm//@types/chai",
"@infra-sk_npm//@types/sinon",
],
)
rollup_bundle(
name = name + "_bundle",
entry_point = entry_point,
deps = [
name + "_lib",
"@infra-sk_npm//@rollup/plugin-node-resolve",
"@infra-sk_npm//@rollup/plugin-commonjs",
"@infra-sk_npm//rollup-plugin-sourcemaps",
],
format = "umd",
config_file = "//infra-sk:rollup.config.js",
)
# This rule is automatically generated by rules_nodejs from Karma's package.json file.
_generated_karma_test(
name = name,
size = "large",
data = [
name + "_bundle",
"//infra-sk/karma_test:karma.conf.js",
"@infra-sk_npm//karma-chrome-launcher",
"@infra-sk_npm//karma-sinon",
"@infra-sk_npm//karma-mocha",
"@infra-sk_npm//karma-chai",
"@infra-sk_npm//karma-chai-dom",
"@infra-sk_npm//karma-spec-reporter",
"@infra-sk_npm//mocha",
],
templated_args = [
"start",
"$(execpath //infra-sk/karma_test:karma.conf.js)",
"$$(rlocation $(location %s_bundle))" % name,
],
tags = [
# Necessary for it to work with ibazel.
"ibazel_notify_changes",
],
)
| 36.977273 | 100 | 0.609711 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,208 | 0.678549 |
e970a8957b84490bbe0b79a62e25d6fddc55f490
| 5,894 |
py
|
Python
|
stats/ClassicAnalyzerStats.py
|
arndff/fpl-rivals-tracker
|
311b932ab7c07b03c1676e5a971df13e652a1b7b
|
[
"Apache-2.0"
] | 4 |
2019-02-06T10:42:50.000Z
|
2021-02-17T21:09:26.000Z
|
stats/ClassicAnalyzerStats.py
|
arndff/fpl-rivals-tracker
|
311b932ab7c07b03c1676e5a971df13e652a1b7b
|
[
"Apache-2.0"
] | null | null | null |
stats/ClassicAnalyzerStats.py
|
arndff/fpl-rivals-tracker
|
311b932ab7c07b03c1676e5a971df13e652a1b7b
|
[
"Apache-2.0"
] | 1 |
2021-02-17T21:09:27.000Z
|
2021-02-17T21:09:27.000Z
|
from fileutils.fileutils import save_output_to_file, select_option_from_menu
class ClassicAnalyzerStats:
def __init__(self, data, current_event, output_file_name):
self.__data = data
self.__current_event = current_event
self.__output_file_name = output_file_name
self.__output = []
self.__options = self.__init_options()
self.__append_options_to_output()
def save_stats_output_to_file(self):
save_output_to_file(self.__output_file_name, "a+", self.__output)
def stats_menu(self):
while True:
exception_msg = "\n[!] Please enter an integer from 1 to 10."
option = select_option_from_menu(self.__options, exception_msg)
self.__output.append("Selected option: {}".format(option))
if option == -1:
continue
if option == 1:
self.__calculate_average_points()
elif option == 2:
self.__print_captains((list(map(lambda x: x.captain_name, self.__data))))
elif option == 3:
self.__print_captains((list(map(lambda x: x.vice_captain_name, self.__data))))
elif option == 4:
self.__print_chip_usage_whole_season()
elif option == 5:
self.__print_chip_usage_current_event()
elif option == 6:
self.__count_managers_made_transfer()
elif option == 7:
self.__count_managers_took_hit()
elif option == 8:
self.__print_team_value(max)
elif option == 9:
self.__print_team_value(min)
elif option == 10:
self.__output.append("")
break
else:
print("\n[!] Invalid option. Try again!")
@staticmethod
def init_a_dict(key, dictionary):
if key not in dictionary:
dictionary[key] = 1
else:
dictionary[key] += 1
def print_chips(self, chips):
for chip in chips:
string = "{}({})".format(chip, chips[chip])
print(string, end=" ")
self.__output.append(string)
print()
self.__output.append("")
def __init_options(self):
options = ["\n* Please choose an option from 1 to 10:",
"1) Sample's average score",
"2) Most captained players",
"3) Most vice-captained players",
"4) Chips usage during the whole season",
"5) Chips usage during GW{}".format(self.__current_event),
"6) Count of managers made at least one transfer",
"7) Count of managers took at least one hit",
"8) Richest manager(s)",
"9) Poorest manager(s)",
"10) Exit"]
return options
def __calculate_average_points(self):
managers_count = len(self.__data)
total_points = 0
for manager in self.__data:
total_points += manager.gw_points()
total_points -= manager.gw_hits
average_points = total_points / managers_count
result = "{:.2f} points".format(average_points)
print(result)
self.__output.append(result)
self.__output.append("")
def __print_captains(self, list_of_captains):
captains = {}
for captain in list_of_captains:
self.init_a_dict(captain, captains)
captains_sorted = [(captain, captains[captain]) for captain in sorted(captains, key=captains.get, reverse=True)]
for key, value in captains_sorted:
captain = "{}({})".format(key, value)
print(captain, end=" ")
self.__output.append(captain)
print()
self.__output.append("")
def __print_chip_usage_whole_season(self):
chips = {}
for manager in self.__data:
for chip in manager.used_chips_by_gw:
self.init_a_dict(chip, chips)
self.print_chips(chips)
def __print_chip_usage_current_event(self):
active_chips = {}
for manager in self.__data:
active_chip = manager.active_chip
if active_chip != "None":
self.init_a_dict(active_chip, active_chips)
if len(active_chips) < 1:
result = "No manager has used any chip in GW{}".format(self.__current_event)
self.__log_string(result)
else:
self.print_chips(active_chips)
def __count_managers_made_transfer(self):
result = len(list(filter(lambda x: x.gw_transfers > 0, self.__data)))
if result == 1:
managers_count = "1 manager"
else:
managers_count = "{} managers".format(result)
self.__log_string(managers_count)
def __count_managers_took_hit(self):
result = len(list(filter(lambda x: x.gw_hits > 0, self.__data)))
managers_count = "{} managers".format(result)
self.__log_string(managers_count)
def __print_team_value(self, extremum):
team_values = list(map(lambda x: x.team_value, self.__data))
max_value = extremum(team_values)
richest_managers = list(filter(lambda x: x.team_value == max_value, self.__data))
richest_managers_names = (list(map(lambda x: x.manager_name, richest_managers)))
result = ", ".join(richest_managers_names)
result_string = "{} ({}M)".format(result, format(max_value, ".1f"))
self.__log_string(result_string)
def __append_options_to_output(self):
self.__output.append("")
[self.__output.append(option) for option in self.__options]
self.__output.append("")
def __log_string(self, string):
print(string)
self.__output.append(string)
self.__output.append("")
| 33.68 | 120 | 0.588904 | 5,814 | 0.986427 | 0 | 0 | 164 | 0.027825 | 0 | 0 | 601 | 0.101968 |
e971243f262537809157c1b4baa49f7bcb8914f9
| 88 |
py
|
Python
|
xallennlp/training/__init__.py
|
himkt/xallennlp
|
073a1475398e59c70230623016f4036432b9c186
|
[
"MIT"
] | null | null | null |
xallennlp/training/__init__.py
|
himkt/xallennlp
|
073a1475398e59c70230623016f4036432b9c186
|
[
"MIT"
] | null | null | null |
xallennlp/training/__init__.py
|
himkt/xallennlp
|
073a1475398e59c70230623016f4036432b9c186
|
[
"MIT"
] | null | null | null |
import xallennlp.training.mlflow_callback
import xallennlp.training.mlflow_checkpointer
| 29.333333 | 45 | 0.909091 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
e972ad4a4720505a28ff8ccfa9d6a0290e94f706
| 11,599 |
py
|
Python
|
colabutil.py
|
cmcheungMOOC/colabUtil
|
c08da88ae56d461404960de3426344e7da49f3db
|
[
"MIT"
] | 1 |
2018-08-07T05:34:11.000Z
|
2018-08-07T05:34:11.000Z
|
colabutil.py
|
cmcheungMOOC/colabUtil
|
c08da88ae56d461404960de3426344e7da49f3db
|
[
"MIT"
] | null | null | null |
colabutil.py
|
cmcheungMOOC/colabUtil
|
c08da88ae56d461404960de3426344e7da49f3db
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""colabUtil.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1KX9x-rqyj0XfUkLtfOVh8t8T_kW0hs0u
#Colab Util
This is a collection of utility functions that simplifies data science researchin using colab. I wrote this while working through *Deep Learning with Python* by Francisco Chollet.
Most of creatPyDrive is from https://gist.github.com/rdinse/159f5d77f13d03e0183cb8f7154b170a
##Usage
###Pull in py files into colab. The content will be in colabUtil folder.
```python
!pip install -U -q PyDrive
!git clone https://github.com/cmcheungMOOC/colabUtil.git
```
###Add colab directory to module path
```python
import sys
sys.path.insert(0, '/content/colabUtil')
```
###Share and enjoy!
```python
import colabutil as cu
cu.setupGlove()
cu.setupAclImdb()
cu.setupKaggleCatsAndDogs()
cu.restore('CNN_Results')
cu.save('CNN_Results')
```
##Assumptions
I have made the following assumptions to allow me to simplify my code. This code is not meant for general usage.
* Colab VMs are reliable
* Colab VMs will be recycled
These assumptions simply means that you can count on the VM to do work correctly while it is still assigned to you, but the VM will be yanked from under you. So, it is necessary to backup intermediate state information to persistent storage such as a Google drive.
The transient nature of you Colab work space means that there is little reason for complicated directory hierarchies. After all, anything you built up will vanish overnight. This means that a simple directory hierarchy supporting the tasks at hand is all you need.
##Directory Hierarchy
Colab workspace is rooted at /content. This is our defaull directory. In addition, we use /content/dataset to store downloaded datasets. Intermediate states of a ML algorithm is written onto /content. All top level content /content can be zipped up and saved. The content can be restored when needed. Note that only the latest state persists in the Google drive. Unfortuately, I know of no easy way to get the title of a Jupyter notebook. So, a user defined name need to be chosen for the backup zip file.
## Utility Functions
"""
#@title Download Dataset
import requests, os
def download(url, overwrite=False):
baseName = os.path.basename(url)
path = os.path.join(os.getcwd(), baseName)
print('Downloading', url, 'to', path)
if os.path.isfile(path):
if not overwrite:
print(path, 'already exists')
return path
r = requests.get(url, allow_redirects=True)
open(path, 'wb').write(r.content)
return path
#@title Test Download { run: "auto", vertical-output: true }
url = "" #@param ["", "http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz", "http://nlp.stanford.edu/data/glove.6B.zip"]
overwrite = False #@param {type:"boolean"}
if url != "":
download(url, overwrite)
os.listdir()
"""###Untar Dataset into Current Working Directory
Currently, untar only support *.tar.gz. This will be extended only if there is a real use case.
"""
import tarfile, os, shutil
def untar(gzName, dstDir='', skipIfDstDirExists=False):
if dstDir == '':
dstDir = os.path.dirname(gzName)
if dstDir == '':
dstDir = os.getcwd()
if skipIfDstDirExists and os.path.isdir(dstDir):
print(dstDir, 'exists')
return dstDir
print('Extracting', gzName, 'to', dstDir)
t = tarfile.open(name=gzName, mode='r:gz')
#topLevelDirInTar = os.path.commonprefix(t.getnames())
#print('topLevelDirInTar', topLevelDirInTar)
t.extractall(dstDir)
return dstDir
#@title Test Untar { run: "auto", vertical-output: true }
gzName = "" #@param ["", "aclImdb_v1.tar.gz"]
dstDir = "" #@param ["", ".", "/content/dataset"]
if gzName != "":
d = untar(gzName, dstDir)
print(d)
print(os.listdir(d))
#@title Zip Up Content of a Specified Directory
import zipfile, os
def zip(srcDir='.', mode='w'):
print('zip', srcDir, mode)
if not os.path.isdir(srcDir):
print(srcDir, 'is not a dir')
return None
if srcDir == '.':
srcDir = os.getcwd()
zipName = srcDir + '.zip'
print('Creating', zipName, 'from', srcDir)
with zipfile.ZipFile(zipName, mode=mode) as zf:
compression = zipfile.ZIP_DEFLATED
for fname in os.listdir(srcDir):
if os.path.isdir(fname):
print('Skipping', fname)
continue
_, ext = os.path.splitext(fname)
if ext.lower() in ['.zip', '.gz']:
print('Skipping', fname)
continue
path = os.path.join(srcDir, fname)
zf.write(path, compress_type=compression)
print(path, 'is added to', zipName)
return zipName
#@title Test Zip { run: "auto" }
srcDir = "" #@param ["", ".", "/content", "/content/datalab"]
if srcDir != '':
if not os.path.isdir(srcDir):
os.mkdir(srcDir)
print(zip(srcDir))
#@title Unzip Content
import os, zipfile, shutil
def unzip(zipName, dstDir = '', skipIfDstDirExists=False):
if dstDir == '':
dstDir = os.path.dirname(zipName)
if skipIfDstDirExists and os.path.isdir(dstDir):
print(dstDir, 'exists')
return dstDir
print('Extracting', zipName, 'to', dstDir)
z = zipfile.ZipFile(zipName, 'r')
z.extractall(dstDir)
return dstDir
#@title Test Unzip { run: "auto", vertical-output: true }
zipName = "" #@param ["", "glove.6B.zip", "/content/datalab.zip"]
dstDir = "" #@param ["", ".", "/content/dataset/glove.6B", "/content/dataset", "datalab", "a/b", "dataset/tmp"]
if zipName != "":
d = unzip(zipName, dstDir)
print(d)
print(os.listdir(d))
os.listdir(d)
#@title Setup GLOVE
def setupGlove():
zipFile = download('http://nlp.stanford.edu/data/glove.6B.zip')
unzip(zipFile, dstDir='/content/dataset/glove.6B', skipIfDstDirExists=True)
#@title Test GLOVE Setup { run: "auto", vertical-output: true }
test = False #@param {type:"boolean"}
if test:
setupGlove()
#@title Setup ACLIMDB
def setupAclImdb():
gzFile = download('http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz')
untar(gzFile, dstDir='/content/dataset/aclImdb_v1', skipIfDstDirExists=True)
#@title Test ACLIMDB Setup { run: "auto", vertical-output: true }
test = False #@param {type:"boolean"}
if test:
setupAclImdb()
#@title Setup Kaggle Cats and Dogs
def setupKaggleCatsAndDogs():
zipFile = download('https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_3367a.zip')
unzip(zipFile, dstDir='/content/dataset/kagglecatsanddogs_3367a',
skipIfDstDirExists=True)
#@title Test Kaggle Cats and Dogs Setup { run: "auto", vertical-output: true }
test = False #@param {type:"boolean"}
if test:
setupKaggleCatsAndDogs()
"""##Pydrive Utilities
https://gsuitedevs.github.io/PyDrive/docs/build/html/index.html
Content of a specified directory is saved to or restored from a Google drive.
Most of creatPyDrive is from https://gist.github.com/rdinse/159f5d77f13d03e0183cb8f7154b170a
"""
#@title Authenticate and Create the PyDrive Client
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
def createPyDrive():
print('createPyDrive')
mycreds_file = 'mycreds_file.json'
gauth = GoogleAuth()
# https://stackoverflow.com/a/24542604/5096199
# Try to load saved client credentials
gauth.LoadCredentialsFile(mycreds_file)
if gauth.credentials is None:
# Authenticate if they're not there
auth.authenticate_user()
gauth.credentials = GoogleCredentials.get_application_default()
print(gauth.credentials)
elif gauth.access_token_expired:
# Refresh them if expired
gauth.Refresh()
else:
# Initialize the saved creds
gauth.Authorize()
# Save the current credentials to a file
gauth.SaveCredentialsFile(mycreds_file)
return GoogleDrive(gauth)
#@title Test CreatePyDrive { run: "auto", vertical-output: true }
test = False #@param {type:"boolean"}
if test:
drive = createPyDrive()
os.listdir()
#@title Create & Upload a File
def uploadFile(drive, fname):
print('uploadFile', fname)
uploaded = drive.CreateFile({'title': fname})
uploaded.SetContentFile(fname)
uploaded.Upload()
print('Uploaded {} with ID {}'.format(fname, uploaded.get('id')))
#@title Test UploadFile to Google Drive { run: "auto", vertical-output: true }
fname = "" #@param ["", "a.txt"]
if fname != '':
if not os.path.exists(fname):
print('Creating', fname)
with open(fname, 'w') as fp:
fp.write('abc')
uploadFile(drive, fname)
#@title Find a File by Name in the Google Drive
def findFile(drive, fname):
file_list = drive.ListFile({'q': "'root' in parents and trashed=false"}).GetList()
for file1 in file_list:
if file1['title'] == fname:
print('title: %s, id: %s' % (file1['title'], file1['id']))
return file1
#@title Test Find File in Google Drive { run: "auto", vertical-output: true }
fname = "" #@param ["", "a.txt"]
if fname != '':
findFile(drive, fname)
#@title Download a File and Optionally Trash it
def downloadFile(drive, fname, trashIt=False):
print('downloadFile', fname)
file1 = findFile(drive, fname)
if not file1:
print(fname, 'not found')
return None
downloaded = drive.CreateFile({'id': file1['id']})
downloaded.GetContentFile(fname)
if trashIt:
downloaded.Trash()
print(fname, 'is moved to trash')
return file1['title']
#@title Test Download from Google Drive { run: "auto", vertical-output: true }
fname = "" #@param ["", "a.txt"]
trashIt = False #@param {type:"boolean"}
if fname != '':
print(downloadFile(drive, fname, trashIt))
#@title Google Drive Class
class GDrive:
def __init__(self):
self.drive = createPyDrive()
def upload(self, fname):
uploadFile(self.drive, fname)
def download(self, fname, trashIt=True):
return downloadFile(self.drive, fname, trashIt)
#@title Test Google Drive Class { run: "auto", vertical-output: true }
fname = "" #@param ["", "a.txt"]
if fname != '':
if not os.path.exists(fname):
with open(fname, 'w') as fp:
fp.write('abc')
gd = GDrive()
gd.upload(fname)
gd.download(fname)
"""###Save and Restore the Content of a Directory"""
#@title Save Directory to Google Drive
def save(srcDirName):
if '/' in srcDirName:
print('Use only the name of the dir, not the path to it')
return
zipName = zip(srcDirName)
gd = GDrive()
gd.upload(zipName)
#@title Test Directory Save { run: "auto", vertical-output: true }
srcDirName = "" #@param ["", "datalab", "/content/datalab"]
if srcDirName != '':
if not os.path.isdir(srcDirName):
os.mkdir(srcDirName)
path = os.path.join(srcDirName, 'abc.txt')
if not os.path.exists(path):
with open(path, 'w') as fp:
fp.write('abc')
save(srcDirName)
#@title Restore Directory from Google Drive
import os
def restore(dstDirName):
if '/' in srcDirName:
print('Use only the name of the dir, not the path to it')
return
if os.path.isdir(dstDirName):
print(dstDirName, 'already exists')
return dstDirName
zipName = dstDirName + '.zip'
gd = GDrive()
zf = gd.download(zipName)
print('zf is', zf)
if zf == None:
os.mkdir(dstDirName)
return None
return unzip(zf, '.')
#@title Test Restore Directory { run: "auto", vertical-output: true }
dstDirName = "" #@param ["", "datalab", "CNN_Results"]
import shutil
if dstDirName != '':
if os.path.isdir(dstDirName):
print('rmtree', dstDirName)
shutil.rmtree(dstDirName)
print(restore(dstDirName))
| 30.049223 | 512 | 0.691698 | 234 | 0.020174 | 0 | 0 | 0 | 0 | 0 | 0 | 6,024 | 0.519355 |
e9736a918f48d6f382688f91eb8391428a99f968
| 2,893 |
py
|
Python
|
sarpy/io/product/base.py
|
spacefan/sarpy
|
2791af86b568c8a8560275aee426a4718d5a4606
|
[
"MIT"
] | 119 |
2018-07-12T22:08:17.000Z
|
2022-03-24T12:11:39.000Z
|
sarpy/io/product/base.py
|
spacefan/sarpy
|
2791af86b568c8a8560275aee426a4718d5a4606
|
[
"MIT"
] | 72 |
2018-03-29T15:57:37.000Z
|
2022-03-10T01:46:21.000Z
|
sarpy/io/product/base.py
|
spacefan/sarpy
|
2791af86b568c8a8560275aee426a4718d5a4606
|
[
"MIT"
] | 54 |
2018-03-27T19:57:20.000Z
|
2022-03-09T20:53:11.000Z
|
"""
Base common features for product readers
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
from typing import Sequence, List, Tuple, Union
from sarpy.io.general.base import AbstractReader
from sarpy.io.product.sidd1_elements.SIDD import SIDDType as SIDDType1
from sarpy.io.product.sidd2_elements.SIDD import SIDDType as SIDDType2
from sarpy.io.complex.sicd_elements.SICD import SICDType
class SIDDTypeReader(AbstractReader):
def __init__(self, sidd_meta, sicd_meta):
"""
Parameters
----------
sidd_meta : None|SIDDType1|SIDDType2|Sequence[SIDDType1]|Sequence[SIDDType2]
The SIDD metadata object(s), if provided
sicd_meta : None|SICDType|Sequence[SICDType]
the SICD metadata object(s), if provided
"""
if sidd_meta is None:
self._sidd_meta = None
elif isinstance(sidd_meta, (SIDDType1, SIDDType2)):
self._sidd_meta = sidd_meta
else:
temp_list = [] # type: List[Union[SIDDType1]]
for el in sidd_meta:
if not isinstance(el, (SIDDType1, SIDDType2)):
raise TypeError(
'Got a collection for sidd_meta, and all elements are required '
'to be instances of SIDDType.')
temp_list.append(el)
self._sidd_meta = tuple(temp_list)
if sicd_meta is None:
self._sicd_meta = None
elif isinstance(sicd_meta, SICDType):
self._sicd_meta = (sicd_meta, )
else:
temp_list = [] # type: List[SICDType]
for el in sicd_meta:
if not isinstance(el, SICDType):
raise TypeError(
'Got a collection for sicd_meta, and all elements are required '
'to be instances of SICDType.')
temp_list.append(el)
self._sicd_meta = tuple(temp_list)
@property
def sidd_meta(self):
# type: () -> Union[None, SIDDType1, SIDDType2, Tuple[SIDDType1], Tuple[SIDDType2]]
"""
None|SIDDType1|SIDDType2|Tuple[SIDDType1]|Tuple[SIDDType2]: the sidd meta_data collection.
"""
return self._sidd_meta
@property
def sicd_meta(self):
# type: () -> Union[None, Tuple[SICDType]]
"""
None|Tuple[SICDType]: the sicd meta_data collection.
"""
return self._sicd_meta
def get_sidds_as_tuple(self):
"""
Get the sidd collection as a tuple - for simplicity and consistency of use.
Returns
-------
Tuple[SIDDType1]|Tuple[SIDDType2]
"""
if self._sidd_meta is None:
return None
elif isinstance(self._sidd_meta, tuple):
return self._sidd_meta
else:
return (self._sidd_meta, )
| 31.445652 | 98 | 0.59281 | 2,472 | 0.854476 | 0 | 0 | 483 | 0.166955 | 0 | 0 | 1,108 | 0.382993 |
e9782a8f7459b65fce4ad645e6b56ab9d0f6103d
| 2,144 |
py
|
Python
|
gladier/base.py
|
globus-labs/gladier_tools
|
0dc4a23af81a2355a908b9a9026f0e68a527c6dc
|
[
"Apache-2.0"
] | 1 |
2020-08-25T20:20:18.000Z
|
2020-08-25T20:20:18.000Z
|
gladier/base.py
|
globus-labs/gladier_tools
|
0dc4a23af81a2355a908b9a9026f0e68a527c6dc
|
[
"Apache-2.0"
] | null | null | null |
gladier/base.py
|
globus-labs/gladier_tools
|
0dc4a23af81a2355a908b9a9026f0e68a527c6dc
|
[
"Apache-2.0"
] | null | null | null |
class GladierBaseTool(object):
"""Gladier Defaults defines a common method of tying together
flows, funcx-functions, and default inputs for starting a flow."""
flow_definition = None
flow_input = dict()
required_input = []
alias_exempt = ['funcx_endpoint_compute', 'funcx_endpoint_non_compute']
funcx_endpoints = dict()
funcx_functions = []
def __init__(self, alias=None, alias_class=None):
self.alias = alias
alias_cls = alias_class
if alias and not alias_class:
raise ValueError(
f'{self.__class__.__name__} given alias "{alias}" but not "alias_class". '
'ex: alias_class=gladier.utils.tool_alias.StateSuffixVariablePrefix'
)
if alias_class:
self.alias_renamer = alias_cls(alias)
def get_required_input(self):
if self.alias:
required = []
for input_var in self.required_input:
if input_var not in self.alias_exempt:
required.append(self.alias_renamer.rename_variable(input_var, self))
else:
required.append(input_var)
return required
else:
return self.required_input
def get_flow_input(self):
if not self.alias:
return self.flow_input
flow_input = dict()
for input_var, val in self.flow_input.items():
if input_var not in self.alias_exempt:
flow_input[self.alias_renamer.rename_variable(input_var, self)] = val
else:
flow_input[input_var] = val
return flow_input
def get_original_inputs(self):
return [input_var for input_var in set(self.required_input) | set(self.flow_input.keys())
if input_var not in self.alias_exempt]
def rename_state(self, state_name, state_data):
name = self.alias_renamer.rename_state(state_name, self)
data = self.alias_renamer.rename_input_variables(state_data,
self.get_original_inputs(), self)
return name, data
| 37.614035 | 97 | 0.615672 | 2,141 | 0.998601 | 0 | 0 | 0 | 0 | 0 | 0 | 326 | 0.152052 |
e97c7053b712437ddd9adb3801c6bf654177920e
| 2,717 |
py
|
Python
|
PersonManage/role/views.py
|
ahriknow/ahriknow
|
817b5670c964e01ffe19ed182ce0a7b42e17ce09
|
[
"MIT"
] | null | null | null |
PersonManage/role/views.py
|
ahriknow/ahriknow
|
817b5670c964e01ffe19ed182ce0a7b42e17ce09
|
[
"MIT"
] | 3 |
2021-03-19T01:28:43.000Z
|
2021-04-08T19:57:19.000Z
|
PersonManage/role/views.py
|
ahriknow/ahriknow
|
817b5670c964e01ffe19ed182ce0a7b42e17ce09
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from redis import StrictRedis
from rest_framework.response import Response
from rest_framework.views import APIView
from PersonManage.role.models import Role
from PersonManage.role.serializer import OneRole, ManyRole
from PersonManage.jurisdiction.models import Jurisdiction
class RoleView(APIView):
def get(self, request, id=None):
if id:
if role := Role.objects.filter(pk=id).first():
data = OneRole(instance=role, many=False).data
return Response({'code': 200, 'msg': 'Query was successful!', 'data': data})
return Response({'code': 400, 'msg': 'Data does not exist!', 'data': None})
else:
roles = Role.objects.all()
data = ManyRole(instance=roles, many=True).data
return Response({'code': 200, 'msg': 'Query was successful!', 'data': data})
def post(self, request):
try:
role = Role(name=request.data['name'], describe=request.data['describe'])
role.save()
return Response({'code': 200, 'msg': 'Create successful!', 'data': None})
except Exception as ex:
if 'UNIQUE' in str(ex):
return Response({'code': 400, 'msg': 'Data duplication!', 'data': None})
return Response({'code': 500, 'msg': str(ex), 'data': None})
def put(self, request, id=None):
if role := Role.objects.filter(pk=id).first():
data = request.data
if name := data.get('name'):
role.name = name
if describe := data.get('describe'):
role.describe = describe
if 'jurisdictions' in data:
redis = StrictRedis(host=settings.DATABASES['redis']['HOST'],
port=settings.DATABASES['redis']['PORT'],
db=settings.DATABASES['redis']['NAME_2'],
password=settings.DATABASES['redis']['PASS'])
redis.flushdb()
role.jurisdictions.clear()
for i in data['jurisdictions']:
jur = Jurisdiction.objects.filter(pk=i).first()
role.jurisdictions.add(jur)
role.save()
return Response({'code': 200, 'msg': 'Update successful!', 'data': None})
return Response({'code': 400, 'msg': 'Data does not exist!', 'data': None})
def delete(self, request, id=None):
if role := Role.objects.filter(pk=id).first():
role.delete()
return Response({'code': 200, 'msg': 'Delete successful!'})
return Response({'code': 400, 'msg': 'Data does not exist!', 'data': None})
| 46.844828 | 92 | 0.560177 | 2,406 | 0.885536 | 0 | 0 | 0 | 0 | 0 | 0 | 479 | 0.176297 |
e97d491587ef3bda7620cb34a61d716763821b01
| 5,288 |
py
|
Python
|
datalad_osf/utils.py
|
adswa/datalad-osf-2
|
25988f898ffc6f489c0855933136f39f79cf8c65
|
[
"BSD-3-Clause"
] | null | null | null |
datalad_osf/utils.py
|
adswa/datalad-osf-2
|
25988f898ffc6f489c0855933136f39f79cf8c65
|
[
"BSD-3-Clause"
] | null | null | null |
datalad_osf/utils.py
|
adswa/datalad-osf-2
|
25988f898ffc6f489c0855933136f39f79cf8c65
|
[
"BSD-3-Clause"
] | null | null | null |
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See LICENSE file distributed along with the datalad_osf package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
import json
from os import environ
from datalad.downloaders.credentials import (
Token,
UserPassword,
)
from datalad import ui
# Note: This should ultimately go into osfclient
def create_node(osf_session, title, category="data", tags=None,
public=False, parent=None, description=None):
""" Create a node on OSF
Parameters
----------
title: str
Title of the node
category: str
categorization changes how the node is displayed
on OSF, but doesn't appear to have a "real" function
tags: list of str
public: bool
whether to make the new node public
parent: str, optional
ID of an OSF parent node to create a child node for
Returns
-------
str
ID of the created node
"""
if parent:
# we have a parent, use its URL to create children
url = osf_session.build_url('nodes', parent, 'children')
else:
url = osf_session.build_url('nodes')
post_data = {"data":
{"type": "nodes",
"attributes":
{"title": title,
"category": category,
"public": public,
}
}
}
if tags:
post_data["data"]["attributes"]["tags"] = tags
if description:
post_data["data"]["attributes"]["description"] = description
response = osf_session.post(url, data=json.dumps(post_data))
# TODO: figure what errors to better deal with /
# create a better message from
response.raise_for_status()
# TODO: This should eventually return an `node` instance (see osfclient).
# Response contains all properties of the created node.
node_id = response.json()['data']['id']
# Note: Going for "html" URL here for reporting back to the user, since this
# what they would need to go to in order to proceed manually.
# There's also the flavor "self" instead, which is the node's
# API endpoint.
proj_url = response.json()["data"]["links"]["html"]
return node_id, proj_url
def delete_node(osf_session, id_):
""" Delete a node on OSF
Parameters
----------
id_: str
to be deleted node ID
"""
url = osf_session.build_url('nodes', id_)
response = osf_session.delete(url)
response.raise_for_status()
def initialize_osf_remote(remote, node,
encryption="none", autoenable="true"):
"""Initialize special remote with a given node
convenience wrapper for git-annex-initremote w/o datalad
Parameters
----------
remote: str
name for the special remote
node: str
ID of the node/component to use
encryption: str
see git-annex-initremote; mandatory option;
autoenable: str
'true' or 'false'; tells git-annex to automatically enable the
special remote on git-annex-init (particularly after a fresh git-clone
"""
init_opts = ["type=external",
"externaltype=osf",
"encryption={}".format(encryption),
"autoenable={}".format(autoenable),
"node={}".format(node)]
import subprocess
subprocess.run(["git", "annex", "initremote", remote] + init_opts)
def get_credentials(allow_interactive=True):
# prefer the environment
if 'OSF_TOKEN' in environ or all(
k in environ for k in ('OSF_USERNAME', 'OSF_PASSWORD')):
return dict(
token=environ.get('OSF_TOKEN', None),
username=environ.get('OSF_USERNAME', None),
password=environ.get('OSF_PASSWORD', None),
)
# fall back on DataLad credential manager
token_auth = Token(
name='https://osf.io',
url='https://osf.io/settings/tokens',
)
up_auth = UserPassword(
name='https://osf.io',
url='https://osf.io/settings/account',
)
do_interactive = allow_interactive and ui.is_interactive()
# get auth token, from environment, or from datalad credential store
# if known-- we do not support first-time entry during a test run
token = environ.get(
'OSF_TOKEN',
token_auth().get('token', None)
if do_interactive or token_auth.is_known
else None)
username = None
password = None
if not token:
# now same for user/password if there was no token
username = environ.get(
'OSF_USERNAME',
up_auth().get('user', None)
if do_interactive or up_auth.is_known
else None)
password = environ.get(
'OSF_PASSWORD',
up_auth().get('password', None)
if do_interactive or up_auth.is_known
else None)
return dict(token=token, username=username, password=password)
| 31.664671 | 87 | 0.580182 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,776 | 0.524962 |
e97dc3bd342d59f1490983b6c64ea74961cdd4e4
| 1,487 |
py
|
Python
|
tpDcc/libs/qt/core/observable.py
|
tpDcc/tpQtLib
|
26b6e893395633a1b189a1b73654891b7688648d
|
[
"MIT"
] | 3 |
2019-08-26T05:56:12.000Z
|
2019-10-03T11:35:53.000Z
|
tpDcc/libs/qt/core/observable.py
|
tpDcc/tpQtLib
|
26b6e893395633a1b189a1b73654891b7688648d
|
[
"MIT"
] | null | null | null |
tpDcc/libs/qt/core/observable.py
|
tpDcc/tpQtLib
|
26b6e893395633a1b189a1b73654891b7688648d
|
[
"MIT"
] | 1 |
2021-03-03T21:01:50.000Z
|
2021-03-03T21:01:50.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains Qt observer pattern related functions and classes
"""
from __future__ import print_function, division, absolute_import
from uuid import uuid4
from functools import partial
from Qt.QtCore import Signal, QObject
class ObservableProxy(QObject):
"""
Observer class that allows us to invoke callbacks in UI threads from non UI threads.
"""
observerSignal = Signal(str, object)
def __init__(self):
super(ObservableProxy, self).__init__()
self._mapping = dict()
self.observerSignal.connect(self._on_call)
# =================================================================================================================
# BASE
# =================================================================================================================
def add_mapping(self, callback):
callback_uuid = str(uuid4())
proxy_callback = partial(self.observerSignal.emit, callback_uuid)
self._mapping[callback_uuid] = callback
return proxy_callback
# =================================================================================================================
# CALLBACKS
# =================================================================================================================
def _on_call(self, uuid, *args, **kwargs):
if uuid in self._mapping:
self._mapping[uuid](args, kwargs)
| 31.638298 | 119 | 0.462677 | 1,199 | 0.806321 | 0 | 0 | 0 | 0 | 0 | 0 | 699 | 0.470074 |
e98066a2b0d3ed3bbd8dc11131cf9f11efdf134a
| 3,645 |
py
|
Python
|
advent-of-code-2019/day 12/main.py
|
gikf/advent-of-code
|
923b026ce87121b73093554734746c2ecb17c5e2
|
[
"MIT"
] | null | null | null |
advent-of-code-2019/day 12/main.py
|
gikf/advent-of-code
|
923b026ce87121b73093554734746c2ecb17c5e2
|
[
"MIT"
] | null | null | null |
advent-of-code-2019/day 12/main.py
|
gikf/advent-of-code
|
923b026ce87121b73093554734746c2ecb17c5e2
|
[
"MIT"
] | null | null | null |
"""Advent of Code 2019 Day 12."""
from functools import lru_cache
import re
def main(file_input='input.txt'):
lines = [line.strip() for line in get_file_contents(file_input)]
moons = parse_moons(lines)
after_steps = simulate_steps(moons, 1000)
total_energy = find_total_energy(after_steps)
print(f'Total energy after 1000 steps: {total_energy}')
cycles = simulate_steps(moons)
*two_cycles, last_cycle = cycles.values()
steps_to_repeat = int(lcm(lcm(*two_cycles), last_cycle))
print(f'Steps to reach first repeating state: {steps_to_repeat}')
def simulate_steps(moons, steps=None):
"""Simulate number steps of moons.
Returns moons after number of steps.
If steps is None returns cycles of moons."""
cycles = {}
initial_moons = moons
step = 0
while not steps or step < steps:
step += 1
moons = moon_motion(moons)
if steps:
continue
for axis in range(3):
if axis in cycles:
continue
if is_cycle(moons, initial_moons, axis):
cycles[axis] = step
if len(cycles) == 3:
return cycles
return moons
def is_cycle(moons, initial, axis):
"""Check if moons cycled at the axis to the initial values."""
for moon, initial in zip(moons, initial):
if (moon['position'][axis] != initial['position'][axis]
or moon['velocity'][axis] != initial['velocity'][axis]):
return False
return True
def moon_motion(initial_moons):
"""Move moons by one step."""
moons = []
for moon in initial_moons:
cur_velocity = moon['velocity']
for other_moon in initial_moons:
if moon == other_moon:
continue
velocity_change = join_with_function(
gravity_effect, moon['position'], other_moon['position'])
cur_velocity = join_with_function(
int.__add__, cur_velocity, velocity_change)
new_position = join_with_function(
int.__add__, moon['position'], cur_velocity)
moons.append({
'position': new_position,
'velocity': cur_velocity,
})
return moons
def join_with_function(func, values1, values2):
"""Join values using func function."""
return [
func(value1, value2)
for value1, value2 in zip(values1, values2)
]
def gravity_effect(position, other_position):
"""Return effect other_position has on position."""
if position == other_position:
return 0
elif position > other_position:
return -1
return 1
def find_total_energy(moons):
"""Get total energy from moons."""
return sum(get_energy(moon['position']) * get_energy(moon['velocity'])
for moon in moons)
def get_energy(values):
"""Get energy from values."""
return sum(abs(value) for value in values)
def parse_moons(lines):
"""Parse lines to dictionary with positions and velocity."""
moons = []
regex = r'([-\d]+)'
for line in lines:
position = [int(num) for num in re.findall(regex, line)]
moons.append({
'position': position,
'velocity': [0, 0, 0]
})
return moons
@lru_cache()
def lcm(a, b):
"""Least common multiple."""
return abs(a * b) / gcd(a, b)
@lru_cache()
def gcd(a, b):
"""Greatest common divisor."""
if b == 0:
return a
return gcd(b, a % b)
def get_file_contents(file):
"""Read all lines from file."""
with open(file) as f:
return f.readlines()
if __name__ == '__main__':
main()
| 27.201493 | 74 | 0.608505 | 0 | 0 | 0 | 0 | 213 | 0.058436 | 0 | 0 | 828 | 0.22716 |
e980cd0e0ae302b2d5e582e27e0280d700f45285
| 1,909 |
py
|
Python
|
rest_framework_json_api/utils.py
|
jwhitlock/drf-json-api
|
a62802432c612c34079f3c3694129f37778e2577
|
[
"MIT"
] | null | null | null |
rest_framework_json_api/utils.py
|
jwhitlock/drf-json-api
|
a62802432c612c34079f3c3694129f37778e2577
|
[
"MIT"
] | null | null | null |
rest_framework_json_api/utils.py
|
jwhitlock/drf-json-api
|
a62802432c612c34079f3c3694129f37778e2577
|
[
"MIT"
] | null | null | null |
from django.utils.encoding import force_text
from django.utils.text import slugify
try:
from rest_framework.serializers import ManyRelatedField
except ImportError:
ManyRelatedField = type(None)
try:
from rest_framework.serializers import ListSerializer
except ImportError:
ListSerializer = type(None)
def get_related_field(field):
if isinstance(field, ManyRelatedField):
return field.child_relation
if isinstance(field, ListSerializer):
return field.child
return field
def is_related_many(field):
if hasattr(field, "many"):
return field.many
if isinstance(field, ManyRelatedField):
return True
if isinstance(field, ListSerializer):
return True
return False
def model_from_obj(obj):
model = getattr(obj, "model", None)
if model is not None:
return model
queryset = getattr(obj, "queryset", None)
if queryset is not None:
return queryset.model
return None
def model_to_resource_type(model):
'''Return the verbose plural form of a model name, with underscores
Examples:
Person -> "people"
ProfileImage -> "profile_image"
'''
if model is None:
return "data"
return force_text(model._meta.verbose_name_plural)
#
# String conversion
#
def camelcase(string):
'''Return a string in lowerCamelCase
Examples:
"people" -> "people"
"profile images" -> "profileImages"
'''
out = slug(string).replace('-', ' ').title().replace(' ', '')
return out[0].lower() + out[1:]
def slug(string):
'''Return a string where words are connected with hyphens'''
return slugify(force_text(string))
def snakecase(string):
'''Return a string where words are connected with underscores
Examples:
"people" -> "people"
"profile images" -> "profile_images"
'''
return slug(string).replace('-', '_')
| 20.526882 | 71 | 0.671032 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 550 | 0.288109 |
e982548723b8fb19b5a93e5e600f9ad6d5133e1c
| 2,246 |
py
|
Python
|
Ui_ZhkuMainWindow.py
|
yujiecong/PyQt-Zhku-Client
|
8fa35592cbf8af7efe8d55d4f66625cd4918a3ff
|
[
"MIT"
] | null | null | null |
Ui_ZhkuMainWindow.py
|
yujiecong/PyQt-Zhku-Client
|
8fa35592cbf8af7efe8d55d4f66625cd4918a3ff
|
[
"MIT"
] | null | null | null |
Ui_ZhkuMainWindow.py
|
yujiecong/PyQt-Zhku-Client
|
8fa35592cbf8af7efe8d55d4f66625cd4918a3ff
|
[
"MIT"
] | 1 |
2021-09-14T03:28:16.000Z
|
2021-09-14T03:28:16.000Z
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Ui_ZhkuMainWindow.ui'
#
# Created by: PyQt5 UI code generator 5.15.2
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_ZhkuMainWindow(object):
def setupUi(self, ZhkuMainWindow):
ZhkuMainWindow.setObjectName("ZhkuMainWindow")
ZhkuMainWindow.resize(1007, 543)
ZhkuMainWindow.setStyleSheet("QMainWindow#ZhkuMainWindow{\n"
"border-image:url(:/img/img/ece414499b12f26fc1cdc8ccd7e019ea.jpg)}")
self.centralwidget = QtWidgets.QWidget(ZhkuMainWindow)
self.centralwidget.setStyleSheet("")
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.centralwidget)
self.horizontalLayout_2.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_2.setSpacing(0)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.widget = QtWidgets.QWidget(self.centralwidget)
self.widget.setObjectName("widget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.widget)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setSpacing(0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.horizontalLayout_2.addWidget(self.widget)
self.widget_2 = QtWidgets.QWidget(self.centralwidget)
self.widget_2.setObjectName("widget_2")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.widget_2)
self.horizontalLayout_3.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_3.setSpacing(0)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.horizontalLayout_2.addWidget(self.widget_2)
ZhkuMainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(ZhkuMainWindow)
QtCore.QMetaObject.connectSlotsByName(ZhkuMainWindow)
def retranslateUi(self, ZhkuMainWindow):
_translate = QtCore.QCoreApplication.translate
ZhkuMainWindow.setWindowTitle(_translate("ZhkuMainWindow", "MainWindow"))
import qr_img_rc
| 44.92 | 81 | 0.740427 | 1,883 | 0.838379 | 0 | 0 | 0 | 0 | 0 | 0 | 525 | 0.233749 |
e9869e465ad91d2e5ca0674a3741999310e41b5c
| 95 |
py
|
Python
|
calheatmap/apps.py
|
acdh-oeaw/gtrans
|
6f56b1d09de0cad503273bf8a01cd81e25220524
|
[
"MIT"
] | 1 |
2020-03-15T16:14:02.000Z
|
2020-03-15T16:14:02.000Z
|
calheatmap/apps.py
|
acdh-oeaw/gtrans
|
6f56b1d09de0cad503273bf8a01cd81e25220524
|
[
"MIT"
] | 14 |
2018-11-09T08:34:23.000Z
|
2022-02-10T08:15:53.000Z
|
calheatmap/apps.py
|
acdh-oeaw/gtrans
|
6f56b1d09de0cad503273bf8a01cd81e25220524
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class CalheatmapConfig(AppConfig):
name = 'calheatmap'
| 15.833333 | 34 | 0.768421 | 58 | 0.610526 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 0.126316 |
e987a8021b1287256296f2282748c6e9f81dfd63
| 767 |
py
|
Python
|
ntcir15_tools/eval/__init__.py
|
longpham28/ntcir15_tools
|
d5fd138a3c90dfd2c5a67ea908101fed5563484d
|
[
"MIT"
] | null | null | null |
ntcir15_tools/eval/__init__.py
|
longpham28/ntcir15_tools
|
d5fd138a3c90dfd2c5a67ea908101fed5563484d
|
[
"MIT"
] | null | null | null |
ntcir15_tools/eval/__init__.py
|
longpham28/ntcir15_tools
|
d5fd138a3c90dfd2c5a67ea908101fed5563484d
|
[
"MIT"
] | null | null | null |
import numpy as np
from pyNTCIREVAL import Labeler
from pyNTCIREVAL.metrics import MSnDCG
from collections import defaultdict
from ntcir15_tools.data import en_query_ids, ja_query_ids, en_labels, ja_labels
def get_rel_level(text):
if text == "L0":
return 0
if text == "L1":
return 1
if text == "L2":
return 2
return 0
def get_qrels(query_id):
lang = query_id.split("-")[1]
assert query_id in en_query_ids or query_id in ja_query_ids, "not valid query_id"
if lang == "E":
labels = en_labels
else:
labels = ja_labels
temp = labels[labels[:, 0] == query_id]
temp = temp[:, 1:]
result = {}
for col_id, text in temp:
result[col_id] = get_rel_level(text)
return result
| 24.741935 | 85 | 0.647979 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 38 | 0.049544 |
e987c807f21477bc86678b22246d01c6112ae5c0
| 50 |
py
|
Python
|
classification/cifar10/losses/__init__.py
|
AkibMashrur/Ensembling
|
bdf2f601be90070fed10db62a9c15506e1df37b6
|
[
"Apache-2.0"
] | null | null | null |
classification/cifar10/losses/__init__.py
|
AkibMashrur/Ensembling
|
bdf2f601be90070fed10db62a9c15506e1df37b6
|
[
"Apache-2.0"
] | null | null | null |
classification/cifar10/losses/__init__.py
|
AkibMashrur/Ensembling
|
bdf2f601be90070fed10db62a9c15506e1df37b6
|
[
"Apache-2.0"
] | null | null | null |
from .contrastive import SupConLoss, NoiseConLoss
| 25 | 49 | 0.86 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
e988aca86693a630d0af6b4768506c2e555391e5
| 71 |
py
|
Python
|
Atividade do Livro-Nilo Ney(PYTHON)/Cap.03/exe 3.13.py
|
EduardoJonathan0/Python
|
0e4dff4703515a6454ba25c6f401960b6155f32f
|
[
"MIT"
] | null | null | null |
Atividade do Livro-Nilo Ney(PYTHON)/Cap.03/exe 3.13.py
|
EduardoJonathan0/Python
|
0e4dff4703515a6454ba25c6f401960b6155f32f
|
[
"MIT"
] | null | null | null |
Atividade do Livro-Nilo Ney(PYTHON)/Cap.03/exe 3.13.py
|
EduardoJonathan0/Python
|
0e4dff4703515a6454ba25c6f401960b6155f32f
|
[
"MIT"
] | null | null | null |
C = int(input("Insira um valor: "))
Fire = (9 * C / 5) + 32
print(Fire)
| 23.666667 | 35 | 0.56338 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 19 | 0.267606 |
e9895372814e45f43f516d5ef779aac132b10fc9
| 2,145 |
py
|
Python
|
notebooks/Detecting Covid-19 through Transfer Learning/src/test.py
|
supria68/Data-Science-Projects
|
423695c130a92db1a188b3d3a13871f0f76f6f5b
|
[
"MIT"
] | 2 |
2020-09-16T19:37:30.000Z
|
2021-11-01T17:49:36.000Z
|
notebooks/Detecting Covid-19 through Transfer Learning/src/test.py
|
supria68/Data-Science-Projects
|
423695c130a92db1a188b3d3a13871f0f76f6f5b
|
[
"MIT"
] | null | null | null |
notebooks/Detecting Covid-19 through Transfer Learning/src/test.py
|
supria68/Data-Science-Projects
|
423695c130a92db1a188b3d3a13871f0f76f6f5b
|
[
"MIT"
] | 1 |
2021-11-01T17:49:37.000Z
|
2021-11-01T17:49:37.000Z
|
"""
filename: test.py
author: Supriya Sudarshan
version: 19.04.2021
description: Takes in the images and predicts (Covid or Non-Covid/Normal) using the *.h5 models
"""
import numpy as np
import matplotlib.pyplot as plt
import os
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.vgg19 import preprocess_input
import random
def evaluate(img_path, model):
"""
Given the image path and model, preprocess the input image and get
predictions
"""
img = image.load_img(img_path, target_size=(224,224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
image_data = preprocess_input(x)
y_pred = model.predict(image_data)
probability = y_pred[0]
if probability[0] > 0.5:
prediction = str('%.2f' % (probability[0]*100) + '% COVID')
else:
prediction = str('%.2f' % ((1-probability[0])*100) + '% Normal')
plt.title(prediction)
plt.imshow(img)
plt.show()
if __name__ == "__main__":
# Load appropriate models
ct_model = load_model('../saved_models/chest_ct_vggmodel.h5')
xray_model = load_model('../saved_models/chest_xray_vggmodel.h5')
ultrasound_model = load_model('../saved_models/ultrasound_vggmodel.h5')
##### Predictions CT
path = '../images_for_testing/CT'
img = random.choice([x for x in os.listdir(path) if os.path.isfile(os.path.join(path, x))])
print('\nPreparing to predict for a CT image: {}'.format(img))
evaluate(path + '/'+ img, ct_model)
##### Predictions Xray
path = '../images_for_testing/Xray'
img = random.choice([x for x in os.listdir(path) if os.path.isfile(os.path.join(path, x))])
print('\nPreparing to predict for a Xray image: {}'.format(img))
evaluate(path + '/'+ img, xray_model)
##### Predictions Ultrasound
path = '../images_for_testing/Ultrasound'
img = random.choice([x for x in os.listdir(path) if os.path.isfile(os.path.join(path, x))])
print('\nPreparing to predict for a ultrasound image: {}'.format(img))
evaluate(path + '/'+ img, ultrasound_model)
| 32.014925 | 97 | 0.674592 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 757 | 0.352914 |
e98a1dc0d5d9161eac10445f95ac9ce1dbe57950
| 348 |
py
|
Python
|
projecteuler/problems/problem_41.py
|
hjheath/ProjectEuler
|
6961fe81e2039c281ea9d4ab0bdd85611bf256a8
|
[
"MIT"
] | 1 |
2015-04-25T10:37:52.000Z
|
2015-04-25T10:37:52.000Z
|
projecteuler/problems/problem_41.py
|
hjheath/ProjectEuler
|
6961fe81e2039c281ea9d4ab0bdd85611bf256a8
|
[
"MIT"
] | null | null | null |
projecteuler/problems/problem_41.py
|
hjheath/ProjectEuler
|
6961fe81e2039c281ea9d4ab0bdd85611bf256a8
|
[
"MIT"
] | null | null | null |
"""Problem 41 of https://projecteuler.net"""
from itertools import permutations
from projecteuler.inspectors import is_prime
def problem_41():
"""Solution to problem 41."""
# All 8 and 9 digit pandigitals are divisible by 3.
perms = [int(''.join(x)) for x in permutations('1234567')]
return max(x for x in perms if is_prime(x))
| 26.769231 | 62 | 0.698276 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 135 | 0.387931 |
e98cb6485313bf23d0ef3116dfc0e309cd633aad
| 3,064 |
py
|
Python
|
preprocess/utils.py
|
federicozaiter/LogClass
|
62c1c9c61294625bdb3d99dc01b6adc7b735c4ab
|
[
"MIT"
] | 159 |
2020-02-19T00:19:23.000Z
|
2022-03-30T08:40:08.000Z
|
preprocess/utils.py
|
WeibinMeng/LogClass-1
|
8edbaf4377374e2aac5e7057987e1d047b83ff2f
|
[
"MIT"
] | 3 |
2021-06-09T04:30:35.000Z
|
2022-01-09T23:26:07.000Z
|
preprocess/utils.py
|
WeibinMeng/LogClass-1
|
8edbaf4377374e2aac5e7057987e1d047b83ff2f
|
[
"MIT"
] | 41 |
2020-02-19T00:19:26.000Z
|
2022-03-28T08:02:22.000Z
|
import re
import numpy as np
from tqdm import tqdm
from ..decorators import print_step
from multiprocessing import Pool
# Compiling for optimization
re_sub_1 = re.compile(r"(:(?=\s))|((?<=\s):)")
re_sub_2 = re.compile(r"(\d+\.)+\d+")
re_sub_3 = re.compile(r"\d{2}:\d{2}:\d{2}")
re_sub_4 = re.compile(r"Mar|Apr|Dec|Jan|Feb|Nov|Oct|May|Jun|Jul|Aug|Sep")
re_sub_5 = re.compile(r":?(\w+:)+")
re_sub_6 = re.compile(r"\.|\(|\)|\<|\>|\/|\-|\=|\[|\]")
p = re.compile(r"[^(A-Za-z)]")
def remove_parameters(msg):
# Removing parameters with Regex
msg = re.sub(re_sub_1, "", msg)
msg = re.sub(re_sub_2, "", msg)
msg = re.sub(re_sub_3, "", msg)
msg = re.sub(re_sub_4, "", msg)
msg = re.sub(re_sub_5, "", msg)
msg = re.sub(re_sub_6, " ", msg)
L = msg.split()
# Filtering strings that have non-letter tokens
new_msg = [k for k in L if not p.search(k)]
msg = " ".join(new_msg)
return msg
def remove_parameters_slower(msg):
# Removing parameters with Regex
msg = re.sub(r"(:(?=\s))|((?<=\s):)", "", msg)
msg = re.sub(r"(\d+\.)+\d+", "", msg)
msg = re.sub(r"\d{2}:\d{2}:\d{2}", "", msg)
msg = re.sub(r"Mar|Apr|Dec|Jan|Feb|Nov|Oct|May|Jun|Jul|Aug|Sep", "", msg)
msg = re.sub(r":?(\w+:)+", "", msg)
msg = re.sub(r"\.|\(|\)|\<|\>|\/|\-|\=|\[|\]", " ", msg)
L = msg.split()
p = re.compile("[^(A-Za-z)]")
# Filtering strings that have non-letter tokens
new_msg = [k for k in L if not p.search(k)]
msg = " ".join(new_msg)
return msg
@print_step
def process_logs(input_source, output, process_line=None):
with open(output, "w", encoding='latin-1') as f:
# counting first to show progress with tqdm
with open(input_source, 'r', encoding='latin-1') as IN:
line_count = sum(1 for line in IN)
with open(input_source, 'r', encoding='latin-1') as IN:
with Pool() as pool:
results = pool.imap(process_line, IN, chunksize=10000)
f.writelines(tqdm(results, total=line_count))
@print_step
def load_logs(params, ignore_unlabeled=False):
log_path = params['logs']
unlabel_label = params['healthy_label']
x_data = []
y_data = []
label_dict = {}
target_names = []
with open(log_path, 'r', encoding='latin-1') as IN:
line_count = sum(1 for line in IN)
with open(log_path, 'r', encoding='latin-1') as IN:
for line in tqdm(IN, total=line_count):
L = line.strip().split()
label = L[0]
if label not in label_dict:
if ignore_unlabeled and label == unlabel_label:
continue
if label == unlabel_label:
label_dict[label] = -1.0
elif label not in label_dict:
label_dict[label] = len(label_dict)
target_names.append(label)
x_data.append(" ".join(L[1:]))
y_data.append(label_dict[label])
x_data = np.array(x_data)
y_data = np.array(y_data)
return x_data, y_data, target_names
| 35.627907 | 77 | 0.568864 | 0 | 0 | 0 | 0 | 1,545 | 0.504243 | 0 | 0 | 674 | 0.219974 |
e98ead08452c6bd2e01e97b70008a25d1afdf8fe
| 4,494 |
py
|
Python
|
examples/FasterRCNN/dataset/data_configs_dict.py
|
ruodingt/tensorpack
|
026006457f3ecdedf23d1bb57c8610591d936b3e
|
[
"Apache-2.0"
] | null | null | null |
examples/FasterRCNN/dataset/data_configs_dict.py
|
ruodingt/tensorpack
|
026006457f3ecdedf23d1bb57c8610591d936b3e
|
[
"Apache-2.0"
] | null | null | null |
examples/FasterRCNN/dataset/data_configs_dict.py
|
ruodingt/tensorpack
|
026006457f3ecdedf23d1bb57c8610591d936b3e
|
[
"Apache-2.0"
] | null | null | null |
import os
from dataset.data_config import DataConfig
images_data_base_dir = os.path.abspath('../../../data/datasets_coco/')
data_conf = {
DataConfig.IMAGE_BASEDIR: images_data_base_dir,
DataConfig.TRAIN: [
{
DataConfig.NICKNAME: 'decay_train',
DataConfig.ANN_PATH: os.path.join(os.path.abspath('../../../data/'),
'coco_stack_out/web_decay_600-5.json')
}
]
,
DataConfig.EVAL: [
{
DataConfig.NICKNAME: 'decay_eval',
DataConfig.ANN_PATH: os.path.join(os.path.abspath('../../../data/'),
'coco_stack_out/legacy_decay-3.json')
}
]
}
# images_data_base_dir = os.path.abspath('../../../data/datasets_coco/')
data_conf_tooth_only = {
DataConfig.IMAGE_BASEDIR: os.path.abspath('../../../data/datasets_coco/'),
DataConfig.TRAIN: [
{
DataConfig.NICKNAME: 'decay_train',
DataConfig.ANN_PATH: os.path.join(os.path.abspath('../../../data/'),
'coco_stack_out/web_decay_600-6-tooth.json')
}
]
,
DataConfig.EVAL: [
{
DataConfig.NICKNAME: 'decay_eval',
DataConfig.ANN_PATH: os.path.join(os.path.abspath('../../../data/'),
'coco_stack_out/legacy_decay-7-tooth.json') #
}
]
}
data_conf_tooth_legacy_of = {
DataConfig.IMAGE_BASEDIR: os.path.abspath('../../../data/datasets_coco/'),
DataConfig.TRAIN: [
{
DataConfig.NICKNAME: 'decay_train',
DataConfig.ANN_PATH: os.path.join(os.path.abspath('../../../data/'),
'coco_stack_out/legacy_decay-7-tooth.json')
}
]
,
DataConfig.EVAL: [
{
DataConfig.NICKNAME: 'decay_eval',
DataConfig.ANN_PATH: os.path.join(os.path.abspath('../../../data/'),
'coco_stack_out/legacy_decay-7-tooth.json') #
}
]
}
data_conf_tooth_web_of = {
DataConfig.IMAGE_BASEDIR: os.path.abspath('../../../data/datasets_coco/'),
DataConfig.TRAIN: [
{
DataConfig.NICKNAME: 'decay_train',
DataConfig.ANN_PATH: os.path.join(os.path.abspath('../../../data/'),
'coco_stack_out/web_decay_600-6-tooth.json')
}
]
,
DataConfig.EVAL: [
{
DataConfig.NICKNAME: 'decay_eval',
DataConfig.ANN_PATH: os.path.join(os.path.abspath('../../../data/'),
'coco_stack_out/web_decay_600-6-tooth.json') #
}
]
}
data_conf_lesion_only = {
DataConfig.IMAGE_BASEDIR: os.path.abspath('../../../data/datasets_coco/'),
DataConfig.TRAIN: [
{
DataConfig.NICKNAME: 'decay_train',
DataConfig.ANN_PATH: os.path.join(os.path.abspath('../../../data/'),
'coco_stack_out/web_decay_600-9-lesion.json')
}
]
,
DataConfig.EVAL: [
{
DataConfig.NICKNAME: 'decay_eval',
DataConfig.ANN_PATH: os.path.join(os.path.abspath('../../../data/'),
'coco_stack_out/legacy_decay-8-lesion.json') #
}
]
}
data_conf_gingivitis_only = {
DataConfig.IMAGE_BASEDIR: os.path.abspath('../../../data/datasets_coco/'),
DataConfig.TRAIN: [
{
DataConfig.NICKNAME: 'decay_train',
DataConfig.ANN_PATH: os.path.join(os.path.abspath('../../../data/'),
'coco_stack_out/gingivitis_web_490-13-ging.json')
}
]
,
DataConfig.EVAL: [
{
DataConfig.NICKNAME: 'decay_eval',
DataConfig.ANN_PATH: os.path.join(os.path.abspath('../../../data/'),
'coco_stack_out/legacy_decay-14-ging.json') #
}
]
}
| 36.536585 | 99 | 0.459947 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,104 | 0.245661 |
e98ee77c65cf6881d1b3b3557c92ca630d8803bb
| 2,905 |
py
|
Python
|
beaconsite/tests/test_permissions.py
|
brand-fabian/varfish-server
|
6a084d891d676ff29355e72a29d4f7b207220283
|
[
"MIT"
] | 14 |
2019-09-30T12:44:17.000Z
|
2022-02-04T14:45:16.000Z
|
beaconsite/tests/test_permissions.py
|
brand-fabian/varfish-server
|
6a084d891d676ff29355e72a29d4f7b207220283
|
[
"MIT"
] | 244 |
2021-03-26T15:13:15.000Z
|
2022-03-31T15:48:04.000Z
|
beaconsite/tests/test_permissions.py
|
brand-fabian/varfish-server
|
6a084d891d676ff29355e72a29d4f7b207220283
|
[
"MIT"
] | 8 |
2020-05-19T21:55:13.000Z
|
2022-03-31T07:02:58.000Z
|
from django.urls import reverse
from projectroles.tests.test_permissions import TestProjectPermissionBase
from beaconsite.tests.factories import ConsortiumFactory, SiteFactory
class UsersMixin:
def setUp(self):
super().setUp()
self.consortium = ConsortiumFactory()
self.site = SiteFactory()
self.good_users = [
self.superuser,
]
self.bad_users = [
self.anonymous,
self.user_no_roles,
self.owner_as_cat.user,
self.owner_as.user,
self.delegate_as.user,
self.contributor_as.user,
self.guest_as.user,
]
class TestIndexView(UsersMixin, TestProjectPermissionBase):
def test_index(self):
url = reverse("beaconsite:index")
self.assert_response(url, self.good_users, 200)
self.assert_response(url, self.bad_users, 302)
class TestConsortiumViews(UsersMixin, TestProjectPermissionBase):
def test_list(self):
url = reverse("beaconsite:consortium-list")
self.assert_response(url, self.good_users, 200)
self.assert_response(url, self.bad_users, 302)
def test_detail(self):
url = reverse(
"beaconsite:consortium-detail", kwargs={"consortium": str(self.consortium.sodar_uuid)}
)
self.assert_response(url, self.good_users, 200)
self.assert_response(url, self.bad_users, 302)
def test_update(self):
url = reverse(
"beaconsite:consortium-update", kwargs={"consortium": str(self.consortium.sodar_uuid)}
)
self.assert_response(url, self.good_users, 200)
self.assert_response(url, self.bad_users, 302)
def test_delete(self):
url = reverse(
"beaconsite:consortium-delete", kwargs={"consortium": str(self.consortium.sodar_uuid)}
)
self.assert_response(url, self.good_users, 200)
self.assert_response(url, self.bad_users, 302)
class TestSiteViews(UsersMixin, TestProjectPermissionBase):
def test_list(self):
url = reverse("beaconsite:site-list")
self.assert_response(url, self.good_users, 200)
self.assert_response(url, self.bad_users, 302)
def test_detail(self):
url = reverse("beaconsite:site-detail", kwargs={"site": str(self.site.sodar_uuid)})
self.assert_response(url, self.good_users, 200)
self.assert_response(url, self.bad_users, 302)
def test_update(self):
url = reverse("beaconsite:site-update", kwargs={"site": str(self.site.sodar_uuid)})
self.assert_response(url, self.good_users, 200)
self.assert_response(url, self.bad_users, 302)
def test_delete(self):
url = reverse("beaconsite:site-delete", kwargs={"site": str(self.site.sodar_uuid)})
self.assert_response(url, self.good_users, 200)
self.assert_response(url, self.bad_users, 302)
| 35.864198 | 98 | 0.664028 | 2,716 | 0.93494 | 0 | 0 | 0 | 0 | 0 | 0 | 284 | 0.097762 |
e98f3c0cbfe695e09cf6acaf634dcaef0d39ab20
| 965 |
py
|
Python
|
backend/forms.py
|
adarshrao1/Flood_detection
|
4a2a7ecef178366700d5c29a13d45143eaa7cc54
|
[
"CC0-1.0"
] | null | null | null |
backend/forms.py
|
adarshrao1/Flood_detection
|
4a2a7ecef178366700d5c29a13d45143eaa7cc54
|
[
"CC0-1.0"
] | null | null | null |
backend/forms.py
|
adarshrao1/Flood_detection
|
4a2a7ecef178366700d5c29a13d45143eaa7cc54
|
[
"CC0-1.0"
] | 5 |
2021-06-05T14:11:04.000Z
|
2021-06-19T05:51:56.000Z
|
from django.forms import ModelForm
from backend.models import Image, Image2
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django import forms
class CreateUserForm(UserCreationForm):
email = forms.EmailField(
widget=forms.TextInput(attrs={'class': 'form-control', }),
)
username = forms.CharField(
widget=forms.TextInput(attrs={'class': 'form-control', }),
)
password1 = forms.CharField(
widget=forms.PasswordInput(attrs={'class': 'form-control', }),
)
password2 = forms.CharField(
widget=forms.PasswordInput(attrs={'class': 'form-control', }),
)
class Meta:
model = User
fields = ['username', 'email', 'password1', 'password2']
class ImageForm(ModelForm):
class Meta:
model = Image
fields = "__all__"
class ImageForm2(ModelForm):
class Meta:
model = Image2
fields = "__all__"
| 26.081081 | 70 | 0.654922 | 756 | 0.78342 | 0 | 0 | 0 | 0 | 0 | 0 | 141 | 0.146114 |
e98f933a4b8c3a1b81125f679e51f0db2f252a76
| 22,851 |
py
|
Python
|
uldaq-1.2.1/uldaq/ul_c_interface.py
|
Novellogiampiero/RapLib
|
614d25abf402052dcaf81aa72044e3a03cb014fa
|
[
"Apache-2.0"
] | null | null | null |
uldaq-1.2.1/uldaq/ul_c_interface.py
|
Novellogiampiero/RapLib
|
614d25abf402052dcaf81aa72044e3a03cb014fa
|
[
"Apache-2.0"
] | null | null | null |
uldaq-1.2.1/uldaq/ul_c_interface.py
|
Novellogiampiero/RapLib
|
614d25abf402052dcaf81aa72044e3a03cb014fa
|
[
"Apache-2.0"
] | null | null | null |
"""
Created on Mar 7 2018
@author: MCC
"""
from ctypes import (CDLL, CFUNCTYPE, Structure, c_uint, c_int, c_longlong,
POINTER, c_double, c_char, py_object, c_ulonglong, cast,
c_char_p, c_byte)
from enum import IntEnum
from .ul_structs import DaqDeviceDescriptor, AiQueueElement, TransferStatus
from .ul_structs import DaqInChanDescriptor, MemDescriptor, DaqOutChanDescriptor, EventCallbackArgs
from .ul_enums import DaqEventType
from sys import platform
if platform.startswith('darwin'):
lib = CDLL('libuldaq.dylib')
else:
lib = CDLL('libuldaq.so')
#
# Structures
#
class EventParams(Structure):
_fields_ = [("user_data", py_object), # the user data
("user_callback", py_object), ]
#
# Enums
#
class UlInfoItem (IntEnum):
"""UL version information."""
VER_STR = 2000, #: UL version number
IP_ADDR_STR = 2001, #: Returns the IP address of the Ethernet DAQ device
NET_IFC_STR = 2002, #: Returns the name of the network interface which is used to connect to the Ethernet DAQ device
class DevItemInfo (IntEnum):
"""Device information types"""
HAS_AI_DEV = 1, #: The DAQ device has an analog input subsystem.
HAS_AO_DEV = 2, #: The DAQ device has an analog output subsystem.
HAS_DIO_DEV = 3, #: The DAQ device has a Digital I/O subsystem.
HAS_CTR_DEV = 4, #: The DAQ device has a counter input subsystem.
HAS_TMR_DEV = 5, #: The DAQ device has a timer output subsystem.
HAS_DAQI_DEV = 6, #: The DAQ device has a DAQ input subsystem.
HAS_DAQO_DEV = 7, #: The DAQ device has an DAQ output subsystem.
DAQ_EVENT_TYPES = 8, #: Event types supported by the DAQ device
MEM_REGIONS = 9, #: Memory regions supported by the DAQ device
class DevConfigItem (IntEnum):
"""Device Configuration Items"""
HAS_EXP = 1, #: The DAQ device has an expansion board attached.
CONNECTION_CODE = 2, #: Connection code of the Ethernet DAQ device.
MEM_UNLOCK_CODE = 3, #: Memory unlock code.
RESET = 4, #: Resets the DAQ device.
class AiInfoItem (IntEnum):
"""Use with ulAIGetInfo() to obtain AI subsystem information."""
RESOLUTION = 1, #: The A/D resolution in number of bits.
NUM_CHANS = 2, #: The number of A/D channels on the specified device.
NUM_CHANS_BY_MODE = 3, #: The number of A/D channels for the specified channel mode.
NUM_CHANS_BY_TYPE = 4, #: The number of A/D channels for the specified channel type.
CHAN_TYPES = 5, #: A bitmask of supported :func:'~ul_daq.AiChanType' values.
SCAN_OPTIONS = 6, #: A bitmask of supported :func:'~ul_daq.ScanOption' values.
HAS_PACER = 7, #: Paced operations are supported.
NUM_DIFF_RANGES = 8, #: A number of supported :func:'~ul_daq.Range' values for differential mode operations.
NUM_SE_RANGES = 9, #: A number of supported :func:'~ul_daq.Range' values for single-ended mode operations.
DIFF_RANGE = 10, #: The :func:'~ul_daq.Range' for the specified differential range index.
SE_RANGE = 11, #: The :func:'~ul_daq.Range' for the specified single-ended range index.
TRIG_TYPES = 12, #: A bitmask of supported :func:'~ul_daq.TriggerType' values.
MAX_QUEUE_LENGTH_BY_MODE = 13, #: The maximum length of the queue for the specified channel mode.
QUEUE_TYPES = 14, #: A bitmask of supported :func:'~ul_daq.AiQueueType' values supported for the specified device.
QUEUE_LIMITS = 15, #: A bitmask of supported :func:'~ul_daq.AiChanQueueLimitation' values.
FIFO_SIZE = 16, #: FIFO size in bytes.
IEPE_SUPPORTED = 17, #: Returns a zero or non-zero value to the infoValue argument. If non-zero, IEPE mode is supported.
class AiInfoItemDbl (IntEnum):
"""Use with ulAIGetInfoDbl() to obtain AI subsystem information."""
MIN_SCAN_RATE = 1000, #: The minimum scan rate of the specified device.
MAX_SCAN_RATE = 1001, #: The maximum scan rate of the specified device.
MAX_THROUGHPUT = 1002, #: The maximum throughput in samples per second of the specified device.
MAX_BURST_RATE = 1003, #: The maximum scan rate in samples per second when using :func:'~ul_daq.ScanOption.SO_BURSTIO' mode.
MAX_BURST_THROUGHPUT = 1004, #: The maximum throughput in samples per second when using :func:'~ul_daq.ScanOption.SO_BURSTIO' mode.
class AiConfigItem (IntEnum):
"""Use with ulSetConfig() and ulGetConfig() to perform configuration operations on the AI subsystem."""
CHAN_TYPE = 1, #: The channel type of the specified channel. Set with :func:'~ul_daq.AiChanType'.
CHAN_TC_TYPE = 2, #: The thermocouple type of the specified channel. Set with :func:'~ul_daq.TcType'.
CHAN_TEMP_UNIT = 3, #: The temperature unit of the specified channel. Set with :func:'~ul_daq.TempUnit'.
TEMP_UNIT = 4, #: The temperature unit for the specified device. Set with :func:'~ul_daq.AiChanType'.
ADC_TIMING_MODE = 5, #: The timing mode. Set with :func:'~ul_daq.AdcTimingMode'.
AUTO_ZERO_MODE = 6, #: The auto zero mode. Set with :func:'~ul_daq.AutoZeroMode'.
CAL_DATE = 7, #: The date when the device was calibrated last.
#: The IEPE current excitation mode for the specified channel. Set with :func:'~ul_daq.IepeMode'.
CHAN_IEPE_MODE = 8,
CHAN_COUPLING_MODE = 9, #: The coupling mode for the specified device. Set with :func:'~ul_daq.CouplingMode'.
CHAN_SENSOR_CONNECTION_TYPE = 10, #: The connection type of the sensor connected to the specified channel.
CHAN_OTD_MODE = 11, #: The open thermocouple detection mode for the specified channel. Set with :func:'~ul_daq.OtdMode'.
OTD_MODE = 12, #: The open thermocouple detection mode.
CAL_TABLE_TYPE = 13, #: The calibration table type.
REJECT_FREQ_TYPE = 14, #: The rejection frequency type.
#: The date when the expansion board was calibrated last in UNIX Epoch time.
#: Set index to 0 for the factory calibration date, or 1 for the field
#: calibration date. If the value read is not a valid date or the index is
#: invalid, 0 (Unix Epoch) is returned.
EXP_CAL_DATE = 15,
class AiConfigItemDbl (IntEnum):
"""Use with ulSetConfigDbl() and ulGetConfigDbl() to perform configuration operations on the AI subsystem. """
CHAN_SLOPE = 1000, #: The custom slope of the specified channel.
CHAN_OFFSET = 1001, #: The custom offset of the specified channel.
CHAN_SENSOR_SENSIVITY = 1002, #: The sensitivity of the sensor connected to the specified channel.
CHAN_DATA_RATE = 1003, #: The data rate of the specified channel.
class AiConfigItemStr(IntEnum):
#: Calibration date
CAL_DATE = 2000,
#: The channel coefficients used for the configured sensor.
CHAN_COEFS = 2001,
#: Returns the calibration date of expansion board. Set index to 0 for the
#: factory calibration date, or 1 for the field calibration date.
#: If the value read is not a valid date or the index is invalid,
#: Unix Epoch is returned.
EXP_CAL_DATE_STR = 2002,
class DioInfoItem (IntEnum):
"""Use with ulDIOGetInfo() to obtain information about the DIO subsystem."""
NUM_PORTS = 1, #: The number of ports on the specified device.
PORT_TYPE = 2, #: The port type for the specified port index.
PORT_IO_TYPE = 3, #: The #DigitalPortIoType for the specified port index.
NUM_BITS = 4, #: The number of bits on the port specified by the port index.
HAS_PACER = 5, #: Paced operations are supported for the specified digital direction.
SCAN_OPTIONS = 6, #: A bit mask of supported :func:'~ul_daq.ScanOption' values for the specified digital direction.
TRIG_TYPES = 7, #: A bitmask of supported :func:'~ul_daq.TriggerType' values for the specified digital direction.
FIFO_SIZE = 8, #: FIFO size in bytes for the specified digital direction.
class DioInfoItemDbl (IntEnum):
"""Use with ulDIOGetInfoDbl() to obtain information about the DIO subsystem."""
MIN_SCAN_RATE = 1000, #: The minimum scan rate of the specified device.
MAX_SCAN_RATE = 1001, #: The maximum scan rate of the specified device.
MAX_THROUGHPUT = 1002, #: The maximum scanning throughput of the specified device.
class DioConfigItem (IntEnum):
""" Use with ulDIOGetConfig() to obtain information about the DIO subsystem configuration. """
#: The port direction. Set with :func:'~ul_daq.DigitalDirection'.
PORT_DIRECTION_MASK = 1,
#: Writes a value to the specified port number. This allows writing a value when the port is in
#: input mode so that when the port is switched to output mode, the state of the bits is known.
PORT_INITIAL_OUTPUT_VAL = 2,
#: Returns or writes the low-pass filter setting. A 0 indicates that the filter is disabled for the
#: corresponding bit.
PORT_ISO_FILTER_MASK = 3,
#: Returns the port logic. A 0 indicates non-invert mode, and a non-zero value indicates output inverted.
PORT_LOGIC = 4,
class DaqIInfoItem (IntEnum):
"""Use with ulDaqIGetInfo() to obtain DAQ input subsystem information."""
CHAN_TYPES = 1, #: A bitmask of supported :func:'~ul_daq.DaqInChanType' values.
SCAN_OPTIONS = 2, #: A bit mask of supported :func:'~ul_daq.ScanOption' values.
TRIG_TYPES = 3, #: A bitmask of supported :func:'~ul_daq.TriggerType' values.
FIFO_SIZE = 4, #: FIFO size in bytes.
class DaqIInfoItemDbl (IntEnum):
"""Use with ulDaqIGetInfoDbl() to obtain information about the counter subsystem."""
MIN_SCAN_RATE = 1000, #: The minimum scan rate in samples per second.
MAX_SCAN_RATE = 1001, #: The maximum scan rate of the specified device.
MAX_THROUGHPUT = 1002, #: The maximum throughput of the specified device.
class AoConfigItem(IntEnum):
SYNC_MODE = 1, #: The sync mode. Set with AOutSyncMode.
CHAN_SENSE_MODE = 2, #: The channel sense mode. Set with AOutSenseMode.
class AoInfoItem (IntEnum):
"""Use with ulAOGetInfo() to obtain information about the analog output subsystem."""
RESOLUTION = 1, #: The D/A resolution.
NUM_CHANS = 2, #: The number of D/A channels on the specified device.
SCAN_OPTIONS = 3, #: A bit mask of supported :func:'~ul_daq.ScanOption; values.
HAS_PACER = 4, #: Paced operations are supported.
NUM_RANGES = 5, #: The number of supported :func:'~ul_daq.Range' values for D/A operations.
RANGE = 6, #: The :func:'~ul_daq.Range' for the specified range index.
TRIG_TYPES = 7, #: A bitmask of supported :func:'~ul_daq.TriggerType' values.
FIFO_SIZE = 8, #: FIFO size in bytes.
class AoInfoItemDbl (IntEnum):
"""Use with ulAOGetInfoDbl() to obtain information about the Analog output subsystem."""
MIN_SCAN_RATE = 1000, #: The minimum scan rate of the specified device.
MAX_SCAN_RATE = 1001, #: The maximum scan rate of the specified device.
MAX_THROUGHPUT = 1002, #: The maximum scanning throughput of the specified device.
class DaqoInfoItem (IntEnum):
"""Use with ulDaqOGetInfo() to obtain information about the DAQ output subsystem."""
CHAN_TYPES = 1, #: A bit mask of supported :class:`DaqOutChanType` values.
SCAN_OPTIONS = 2, #: A bit mask of supported :class:`ScanOption` values.
TRIG_TYPES = 3, #: A bit mask of supported :class:`TriggerType` values.
FIFO_SIZE = 4, #: FIFO size in bytes.
class DaqoInfoItemDbl (IntEnum):
"""Use with ulDaqOGetInfoDbl() to obtain information about the DAQ output subsystem."""
MIN_SCAN_RATE = 1000, #: The minimum scan rate in samples per second.
MAX_SCAN_RATE = 1001, #: The maximum scan rate of the specified device.
MAX_THROUGHPUT = 1002, #: The maximum throughput of the specified device.
class CtrInfoItem (IntEnum):
"""Use with ulCtrGetInfo() to obtain information about the counter subsystem."""
NUM_CTRS = 1, #: The number of counter channels on the specified device.
MEASUREMENT_TYPES = 2, #: A bit mask of supported :class:`CounterMeasurementType` values.
MEASUREMENT_MODES = 3, #: A bit mask of supported :class:`CounterMeasurementType` values.
REGISTER_TYPES = 4, #: A bit mask of supported :class:`CounterRegisterType` values.
RESOLUTION = 5, #: The resolution of the specified counter channel.
HAS_PACER = 6, #: Paced operations are supported.
SCAN_OPTIONS = 7, #: A bit mask of supported :class:`ScanOption` values.
TRIG_TYPES = 8, #: A bit mask of supported :class:`TriggerType` values.
FIFO_SIZE = 9, #: FIFO size in bytes.
class CtrInfoItemDbl (IntEnum):
"""Use with ulCtrGetInfoDbl() to obtain information about the counter subsystem."""
MIN_SCAN_RATE = 1000, #: The minimum scan rate in samples per second.
MAX_SCAN_RATE = 1001, #: The maximum scan rate of the specified device.
MAX_THROUGHPUT = 1002, #: The maximum throughput of the specified device.
class CtrConfigItem (IntEnum):
"""Use with ulCtrSetConfig() and ulCtrGetConfig() to configure the Ctr subsystem."""
REG = 1, #: The counter(s) configuration register.
class TmrInfoItem (IntEnum):
"""Use with ulTmrGetInfo() to obtain information about the timer subsystem."""
NUM_TMRS = 1, #: The :class:`TimerType` of the specified timer index.
TYPE = 2, #: The number of bits on the port specified by the port index.
class TmrInfoItemDbl (IntEnum):
"""Use with ulTmrGetInfoDbl() to obtain information about the timer subsystem."""
MIN_FREQ = 1000, #: The minimum frequency of the specified device.
MAX_FREQ = 1001, #: The maximum frequency of the specified device.
# Prototypes for callbacks
InterfaceCallbackProcType = CFUNCTYPE(None, c_longlong, c_uint, c_ulonglong, POINTER(EventParams))
def interface_event_callback_function(handle, event_type, event_data, event_params):
# type: (int, DaqEventType, py_object, py_object) -> None
"""Internal function used for handling event callbacks."""
event_parameters = cast(event_params, POINTER(EventParams)).contents
user_data = event_parameters.user_data
cb = event_parameters.user_callback
cb(EventCallbackArgs(event_type, event_data, user_data))
return
# Prototypes for DAQ Device
lib.ulDevGetConfigStr.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_char), POINTER(c_uint))
lib.ulDevGetConfig.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_longlong))
lib.ulGetDaqDeviceDescriptor.argtypes = (c_longlong, POINTER(DaqDeviceDescriptor))
lib.ulDevGetInfo.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_longlong))
lib.ulGetDaqDeviceInventory.argtypes = (c_uint, POINTER(DaqDeviceDescriptor), POINTER(c_uint))
lib.ulConnectDaqDevice.argtypes = (c_longlong,)
lib.ulEnableEvent.argtypes = (c_longlong, c_uint, c_ulonglong, InterfaceCallbackProcType, POINTER(EventParams))
lib.ulDisableEvent.argtypes = (c_longlong, c_uint)
lib.ulMemRead.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_byte), c_uint)
lib.ulMemWrite.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_byte), c_uint)
lib.ulCreateDaqDevice.argtypes = (DaqDeviceDescriptor,)
lib.ulReleaseDaqDevice.argtypes = (c_longlong,)
lib.ulIsDaqDeviceConnected.argtypes = (c_longlong, POINTER(c_int))
lib.ulDisconnectDaqDevice.argtypes = (c_longlong,)
lib.ulFlashLed.argtypes = (c_longlong, c_int)
lib.ulGetInfoStr.argtypes = (c_uint, c_uint, POINTER(c_char), POINTER(c_uint))
lib.ulSetConfig.argtypes = (c_uint, c_uint, c_longlong)
lib.ulGetConfig.argtypes = (c_uint, c_uint, POINTER(c_longlong))
lib.ulGetNetDaqDeviceDescriptor.argtypes = (c_char_p, c_uint, c_char_p,
POINTER(DaqDeviceDescriptor),
c_double)
lib.ulDaqDeviceConnectionCode.argtypes = (c_uint, c_longlong)
# Prototypes for the analog input subsystem
lib.ulAIn.argtypes = (c_longlong, c_int, c_uint, c_uint, c_uint, POINTER(c_double))
lib.ulAInScan.argtypes = (c_longlong, c_int, c_int, c_uint, c_uint, c_int, POINTER(c_double), c_uint, c_uint,
POINTER(c_double))
lib.ulAInScanWait.argtypes = (c_longlong, c_uint, c_longlong, c_double)
lib.ulAInLoadQueue.argtypes = (c_longlong, POINTER(AiQueueElement), c_uint)
lib.ulAInSetTrigger.argtypes = (c_longlong, c_uint, c_int, c_double, c_double, c_uint)
lib.ulAInScanStatus.argtypes = (c_longlong, POINTER(c_uint), POINTER(TransferStatus))
lib.ulAISetConfig.argtypes = (c_longlong, c_uint, c_uint, c_longlong)
lib.ulAIGetConfig.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_longlong))
lib.ulAISetConfigDbl.argtypes = (c_longlong, c_uint, c_uint, c_double)
lib.ulAIGetConfigDbl.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_double))
lib.ulAIGetInfo.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_longlong))
lib.ulAIGetInfoDbl.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_double))
lib.ulAInScanStop.argtypes = (c_longlong,)
lib.ulAIGetConfigStr.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_char), POINTER(c_uint))
lib.ulTIn.argtypes = (c_longlong, c_int, c_uint, c_uint, POINTER(c_double))
lib.ulTInArray.argtypes = (c_longlong, c_int, c_int, c_uint, c_uint,
POINTER(c_double))
# Prototypes for the analog output subsystem
lib.ulAOut.argtypes = (c_longlong, c_int, c_uint, c_uint, c_double)
lib.ulAOutScan.argtypes = (c_longlong, c_int, c_int, c_uint, c_int, POINTER(c_double), c_uint, c_uint,
POINTER(c_double))
lib.ulAOutScanWait.argtypes = (c_longlong, c_uint, c_longlong, c_double)
lib.ulAOutScanStatus.argtypes = (c_longlong, POINTER(c_uint), POINTER(TransferStatus))
lib.ulAOutScanStop.argtypes = (c_longlong,)
lib.ulAOutSetTrigger.argtypes = (c_longlong, c_uint, c_int, c_double, c_double, c_uint)
lib.ulAOGetInfo.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_longlong))
lib.ulAOGetInfoDbl.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_double))
lib.ulAOutArray.argtypes = (c_longlong, c_int, c_int, POINTER(c_uint), c_uint,
POINTER(c_double))
# Prototypes for the DAQ input subsystem
lib.ulDaqInSetTrigger.argtypes = (c_longlong, c_uint, DaqInChanDescriptor, c_double, c_double, c_uint)
lib.ulDaqInScan.argtypes = (c_longlong, POINTER(DaqInChanDescriptor), c_int, c_int, POINTER(c_double), c_uint, c_uint,
POINTER(c_double))
lib.ulDaqInScanStatus.argtypes = (c_longlong, POINTER(c_uint), POINTER(TransferStatus))
lib.ulDaqInScanStop.argtypes = (c_longlong,)
lib.ulDaqInScanWait.argtypes = (c_longlong, c_uint, c_longlong, c_double)
lib.ulDaqIGetInfo.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_longlong))
lib.ulDaqIGetInfoDbl.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_double))
# Prototypes for DIO subsystem
lib.ulDIn.argtypes = (c_longlong, c_uint, POINTER(c_ulonglong))
lib.ulDOut.argtypes = (c_longlong, c_uint, c_ulonglong)
lib.ulDBitIn.argtypes = (c_longlong, c_uint, c_int, POINTER(c_uint))
lib.ulDBitOut.argtypes = (c_longlong, c_uint, c_int, c_uint)
lib.ulDInScan.argtypes = (c_longlong, c_uint, c_uint, c_int, POINTER(c_double), c_uint, c_uint, POINTER(c_ulonglong))
lib.ulDOutScan.argtypes = (c_longlong, c_uint, c_uint, c_int, POINTER(c_double), c_uint, c_uint, POINTER(c_ulonglong))
lib.ulDInScanStatus.argtypes = (c_longlong, POINTER(c_uint), POINTER(TransferStatus))
lib.ulDOutScanStatus.argtypes = (c_longlong, POINTER(c_uint), POINTER(TransferStatus))
lib.ulDOutScanStop.argtypes = (c_longlong,)
lib.ulDInScanStop.argtypes = (c_longlong,)
lib.ulDInScanWait.argtypes = (c_longlong, c_uint, c_longlong, c_double)
lib.ulDOutScanWait.argtypes = (c_longlong, c_uint, c_longlong, c_double)
lib.ulDInSetTrigger.argtypes = (c_longlong, c_uint, c_int, c_double, c_double, c_uint)
lib.ulDOutSetTrigger.argtypes = (c_longlong, c_uint, c_int, c_double, c_double, c_uint)
lib.ulDConfigPort.argtypes = (c_longlong, c_uint, c_uint)
lib.ulDConfigBit.argtypes = (c_longlong, c_uint, c_int, c_uint)
lib.ulDIOGetInfo.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_longlong))
lib.ulDIOGetInfoDbl.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_double))
lib.ulDIOGetConfig.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_longlong))
lib.ulDIOSetConfig.argtypes = (c_longlong, c_uint, c_uint, c_longlong)
lib.ulDInArray.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_ulonglong))
lib.ulDOutArray.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_ulonglong))
# prototypes for DAQ output subsystem
lib.ulDaqOutScan.argtypes = (c_longlong, POINTER(DaqOutChanDescriptor), c_int, c_int, POINTER(c_double), c_uint,
c_uint, POINTER(c_double))
lib.ulDaqOutScanWait.argtypes = (c_longlong, c_uint, c_longlong, c_double)
lib.ulDaqOutScanStatus.argtypes = (c_longlong, POINTER(c_uint), POINTER(TransferStatus))
lib.ulDaqOutScanStop.argtypes = (c_longlong,)
lib.ulDaqOutSetTrigger.argtypes = (c_longlong, c_uint, DaqInChanDescriptor, c_double, c_double, c_uint)
lib.ulDaqOGetInfo.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_longlong))
lib.ulDaqOGetInfoDbl.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_double))
# prototypes for counter subsystem
lib.ulCIn.argtypes = (c_longlong, c_int, POINTER(c_ulonglong))
lib.ulCRead.argtypes = (c_longlong, c_int, c_uint, POINTER(c_ulonglong))
lib.ulCLoad.argtypes = (c_longlong, c_int, c_uint, c_ulonglong)
lib.ulCClear.argtypes = (c_longlong, c_int)
lib.ulCConfigScan.argtypes = (c_longlong, c_int, c_uint, c_uint, c_uint, c_uint, c_uint, c_uint, c_uint)
lib.ulCInScan.argtypes = (c_longlong, c_int, c_int, c_int, POINTER(c_double), c_uint, c_uint, POINTER(c_ulonglong))
lib.ulCInSetTrigger.argtypes = (c_longlong, c_uint, c_int, c_double, c_double, c_uint)
lib.ulCInScanStatus.argtypes = (c_longlong, POINTER(c_uint), POINTER(TransferStatus))
lib.ulCInScanStop.argtypes = (c_longlong,)
lib.ulCInScanWait.argtypes = (c_longlong, c_uint, c_longlong, c_double)
lib.ulCtrGetInfo.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_longlong))
lib.ulCtrGetInfoDbl.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_double))
lib.ulCtrSetConfig.argtypes = (c_longlong, c_uint, c_uint, c_longlong)
lib.ulCtrGetConfig.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_longlong))
# Prototypes for the timer subsystem
lib.ulTmrPulseOutStart.argtypes = (c_longlong, c_int, POINTER(c_double), POINTER(c_double), c_ulonglong,
POINTER(c_double), c_uint, c_uint)
lib.ulTmrPulseOutStop.argtypes = (c_longlong, c_int)
lib.ulTmrPulseOutStatus.argtypes = (c_longlong, c_int, POINTER(c_uint))
lib.ulTmrSetTrigger.argtypes = (c_longlong, c_uint, c_int, c_double, c_double, c_uint)
lib.ulTmrGetInfo.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_longlong))
lib.ulTmrGetInfoDbl.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_double))
# Other Prototypes
lib.ulGetErrMsg.argtypes = (c_uint, POINTER(c_char))
lib.ulDevGetInfo.argtypes = (c_longlong, c_uint, c_uint, POINTER(c_longlong))
lib.ulMemGetInfo.argtypes = (c_longlong, c_uint, POINTER(MemDescriptor))
| 56.843284 | 136 | 0.733928 | 12,835 | 0.561682 | 0 | 0 | 0 | 0 | 0 | 0 | 9,479 | 0.414818 |
e991e9f5f0c1bdfb1e7229e0942eed1c870966c6
| 1,478 |
py
|
Python
|
gfg/trees/sorted_ll_to_bst.py
|
rrwt/daily-coding-challenge
|
b16fc365fd142ebab429e605cb146c8bb0bc97a2
|
[
"MIT"
] | 1 |
2019-04-18T03:29:02.000Z
|
2019-04-18T03:29:02.000Z
|
gfg/trees/sorted_ll_to_bst.py
|
rrwt/daily-coding-challenge
|
b16fc365fd142ebab429e605cb146c8bb0bc97a2
|
[
"MIT"
] | null | null | null |
gfg/trees/sorted_ll_to_bst.py
|
rrwt/daily-coding-challenge
|
b16fc365fd142ebab429e605cb146c8bb0bc97a2
|
[
"MIT"
] | null | null | null |
"""
Given a Singly Linked List which has data members sorted in ascending order.
Construct a Balanced Binary Search Tree which has same data members as the given Linked List.
"""
from typing import Optional
from binary_tree_node import Node # type: ignore
from tree_traversal import inorder # type: ignore
class LLNode:
def __init__(self, data: int):
self.data = data
self.next: Optional[LLNode] = None
def ll_size(head: Optional[LLNode]) -> int:
temp = head
count = 0
while temp:
temp = temp.next
count += 1
return count
def sorted_ll_to_bst(head: Optional[LLNode]) -> Optional[Node]:
def construct(length: int) -> Optional[Node]:
nonlocal head
if head is None or length == 0:
return None
left = construct(length // 2)
root = Node(head.data)
head = head.next
root.left = left
root.right = construct(length - length // 2 - 1)
return root
return construct(ll_size(head))
if __name__ == "__main__":
head = LLNode(1)
head.next = LLNode(2)
head.next.next = LLNode(3)
inorder(sorted_ll_to_bst(head))
print()
head = LLNode(1)
head.next = LLNode(2)
head.next.next = LLNode(3)
head.next.next.next = LLNode(4)
head.next.next.next.next = LLNode(5)
head.next.next.next.next.next = LLNode(6)
head.next.next.next.next.next.next = LLNode(7)
inorder(sorted_ll_to_bst(head))
print()
| 23.460317 | 93 | 0.635995 | 116 | 0.078484 | 0 | 0 | 0 | 0 | 0 | 0 | 216 | 0.146143 |
e9920d3efc1f0f760192d2dad03a56edd3268c51
| 556 |
py
|
Python
|
uvcoverage.py
|
haricash/bayesian-ionized-bubbles
|
c0de5d8ff66f797c72f119b1bc9b11ff8cc63ee6
|
[
"MIT"
] | null | null | null |
uvcoverage.py
|
haricash/bayesian-ionized-bubbles
|
c0de5d8ff66f797c72f119b1bc9b11ff8cc63ee6
|
[
"MIT"
] | null | null | null |
uvcoverage.py
|
haricash/bayesian-ionized-bubbles
|
c0de5d8ff66f797c72f119b1bc9b11ff8cc63ee6
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
from modules.conversions import enu2uvw
data = np.load("uv-array.npy")
e = data[0,:].transpose()
n = data[1,:].transpose()
uvarray = []
for i in range(120):
u,v = enu2uvw( wavelength=1.690,
hour_angle=i/30,
declination=0,
ref_declination=-30,
ref_hour_angle=0,
e=e,
n=n)
# np.save("uv-coverage.npy",u)
uvarray.append((u,v))
np.save("uv-coverage.npy",uvarray)
| 23.166667 | 41 | 0.526978 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 61 | 0.109712 |
e99213e148fd6d67da5c28d0d36014f1bdd56a29
| 6,540 |
py
|
Python
|
main.py
|
Bishalsarang/Leetcode-Questions
|
9d0c938778343c073b631884cc38411ea0ac7cd3
|
[
"MIT"
] | 6 |
2021-09-17T12:26:59.000Z
|
2022-03-11T00:37:35.000Z
|
main.py
|
Bishalsarang/Leetcode-Questions
|
9d0c938778343c073b631884cc38411ea0ac7cd3
|
[
"MIT"
] | null | null | null |
main.py
|
Bishalsarang/Leetcode-Questions
|
9d0c938778343c073b631884cc38411ea0ac7cd3
|
[
"MIT"
] | null | null | null |
# Author: Bishal Sarang
import json
import os
import pickle
import time
import bs4
import colorama
import requests
from colorama import Back, Fore
from ebooklib import epub
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from utils import *
import epub_writer
# Initialize Colorama
colorama.init(autoreset=True)
options = Options()
options.headless = True
# Disable Warning, Error and Info logs
# Show only fatal errors
options.add_argument("--log-level=3")
driver = webdriver.Chrome(options=options)
# Get upto which problem it is already scraped from track.conf file
completed_upto = read_tracker("track.conf")
# Load chapters list that stores chapter info
# Store chapter info
with open('chapters.pickle', 'rb') as f:
chapters = pickle.load(f)
def download(problem_num, url, title, solution_slug):
print(
Fore.BLACK + Back.CYAN + f"Fetching problem num " + Back.YELLOW + f" {problem_num} " + Back.CYAN + " with url " + Back.YELLOW + f" {url} ")
n = len(title)
try:
driver.get(url)
# Wait 20 secs or until div with id initial-loading disappears
element = WebDriverWait(driver, 20).until(
EC.invisibility_of_element_located((By.ID, "initial-loading"))
)
# Get current tab page source
html = driver.page_source
soup = bs4.BeautifulSoup(html, "html.parser")
# Construct HTML
title_decorator = '*' * n
problem_title_html = title_decorator + f'<div id="title">{title}</div>' + '\n' + title_decorator
problem_html = problem_title_html + str(
soup.find("div", {"class": "content__u3I1 question-content__JfgR"})) + '<br><br><hr><br>'
# Append Contents to a HTML file
with open("out.html", "ab") as f:
f.write(problem_html.encode(encoding="utf-8"))
# create and append chapters to construct an epub
c = epub.EpubHtml(title=title, file_name=f'chap_{problem_num}.xhtml', lang='hr')
c.content = problem_html
chapters.append(c)
# Write List of chapters to pickle file
dump_chapters_to_file(chapters)
# Update upto which the problem is downloaded
update_tracker('track.conf', problem_num)
print(
Fore.BLACK + Back.GREEN + f"Writing problem num " + Back.YELLOW + f" {problem_num} " + Back.GREEN + " with url " + Back.YELLOW + f" {url} ")
print(Fore.BLACK + Back.GREEN + " successfull ")
# print(f"Writing problem num {problem_num} with url {url} successfull")
except Exception as e:
print(Back.RED + f" Failed Writing!! {e} ")
driver.quit()
def main():
MAXIMUM_NUMBER_OF_PROBLEMS_PER_INSTANCE = int(os.environ.get("MAXIMUM_NUMBER_OF_PROBLEMS", 400))
SLEEP_TIME_PER_PROBLEM_IN_SECOND = int(os.environ.get("SLEEP_TIME_PER_PROBLEM_IN_SECOND", 5))
# Leetcode API URL to get json of problems on algorithms categories
ALGORITHMS_ENDPOINT_URL = "https://leetcode.com/api/problems/algorithms/"
# Problem URL is of format ALGORITHMS_BASE_URL + question__title_slug
# If question__title_slug = "two-sum" then URL is https://leetcode.com/problems/two-sum
ALGORITHMS_BASE_URL = "https://leetcode.com/problems/"
# Load JSON from API
algorithms_problems_json = requests.get(ALGORITHMS_ENDPOINT_URL).content
algorithms_problems_json = json.loads(algorithms_problems_json)
# List to store question_title_slug
links = []
for child in algorithms_problems_json["stat_status_pairs"]:
# Only process free problems
if not child["paid_only"]:
question__title_slug = child["stat"]["question__title_slug"]
question__article__slug = child["stat"]["question__article__slug"]
question__title = child["stat"]["question__title"]
frontend_question_id = child["stat"]["frontend_question_id"]
difficulty = child["difficulty"]["level"]
links.append(
(question__title_slug, difficulty, frontend_question_id, question__title, question__article__slug))
has_new_problems = (completed_upto != len(links) - 1)
if has_new_problems:
styles_str = "<style>pre{white-space:pre-wrap;background:#f7f9fa;padding:10px 15px;color:#263238;line-height:1.6;font-size:13px;border-radius:3px margin-top: 0;margin-bottom:1em;overflow:auto}b,strong{font-weight:bolder}#title{font-size:16px;color:#212121;font-weight:600;margin-bottom:10px}hr{height:10px;border:0;box-shadow:0 10px 10px -10px #8c8b8b inset}</style>"
with open("out.html", "ab") as f:
f.write(styles_str.encode(encoding="utf-8"))
# Sort by difficulty follwed by problem id in ascending order
links = sorted(links, key=lambda x: (x[1], x[2]))
downloaded_now = 0
try:
for i in range(completed_upto + 1, len(links)):
question__title_slug, _, frontend_question_id, question__title, question__article__slug = links[i]
url = ALGORITHMS_BASE_URL + question__title_slug
title = f"{frontend_question_id}. {question__title}"
# Download each file as html and write chapter to chapters.pickle
download(i, url, title, question__article__slug)
downloaded_now += 1
if downloaded_now == MAXIMUM_NUMBER_OF_PROBLEMS_PER_INSTANCE:
break
# Sleep for 5 secs for each problem and 2 mins after every 30 problems
if i % 30 == 0:
print(f"Sleeping 120 secs\n")
time.sleep(120)
else:
print(f"Sleeping {SLEEP_TIME_PER_PROBLEM_IN_SECOND} secs\n")
time.sleep(SLEEP_TIME_PER_PROBLEM_IN_SECOND)
finally:
# Close the browser after download
driver.quit()
try:
if has_new_problems:
epub_writer.write("Leetcode Questions.epub", "Leetcode Questions", "Anonymous", chapters)
print(Back.GREEN + "All operations successful")
else:
print(Back.GREEN + "No new problems found. Exiting")
except Exception as e:
print(Back.RED + f"Error making epub {e}")
if __name__ == "__main__":
main()
| 40.875 | 376 | 0.657034 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,503 | 0.382722 |
e992f77a4ff4f3363d1bcb7a821282c7065578b8
| 4,985 |
py
|
Python
|
model/magenta_app.py
|
DesmondYuan/DeepMovement
|
b4f347f139d52c345b592bc712260fa579b6c9a8
|
[
"MIT"
] | null | null | null |
model/magenta_app.py
|
DesmondYuan/DeepMovement
|
b4f347f139d52c345b592bc712260fa579b6c9a8
|
[
"MIT"
] | null | null | null |
model/magenta_app.py
|
DesmondYuan/DeepMovement
|
b4f347f139d52c345b592bc712260fa579b6c9a8
|
[
"MIT"
] | 1 |
2020-12-31T14:44:38.000Z
|
2020-12-31T14:44:38.000Z
|
# Adapted from Magenta console commands
import os
from magenta.models.arbitrary_image_stylization import arbitrary_image_stylization_build_model as build_model
from magenta.models.image_stylization import image_utils
import numpy as np
import tensorflow.compat.v1 as tf
import tf_slim as slim
class Magenta_Model():
def __init__(self, checkpoint,
content_square_crop=False, style_square_crop=False,
style_image_size=256, content_image_size=256):
tf.disable_v2_behavior()
tf.Graph().as_default()
sess = tf.Session()
# Defines place holder for the style image.
self.style_img_ph = tf.placeholder(tf.float32, shape=[None, None, 3])
if style_square_crop:
style_img_preprocessed = image_utils.center_crop_resize_image(
style_img_ph, style_image_size)
else:
style_img_preprocessed = image_utils.resize_image(self.style_img_ph,
style_image_size)
# Defines place holder for the content image.
content_img_ph = tf.placeholder(tf.float32, shape=[None, None, 3])
if content_square_crop:
content_img_preprocessed = image_utils.center_crop_resize_image(
content_img_ph, content_image_size)
else:
content_img_preprocessed = image_utils.resize_image(
content_img_ph, content_image_size)
# Defines the model.
stylized_images, _, _, bottleneck_feat = build_model.build_model(
content_img_preprocessed,
style_img_preprocessed,
trainable=False,
is_training=False,
inception_end_point='Mixed_6e',
style_prediction_bottleneck=100,
adds_losses=False)
checkpoint = tf.train.latest_checkpoint(checkpoint)
init_fn = slim.assign_from_checkpoint_fn(checkpoint, slim.get_variables_to_restore())
sess.run([tf.local_variables_initializer()])
init_fn(sess)
self.sess = sess
self.stylized_images = stylized_images
self.content_img_preprocessed = content_img_preprocessed
self.style_img_preprocessed = style_img_preprocessed
self.content_img_ph = content_img_ph
self.bottleneck_feat = bottleneck_feat
def process_data(self, style_images_paths, content_images_paths):
# Gets the list of the input images.
style_img_list = tf.gfile.Glob(style_images_paths)
content_img_list = tf.gfile.Glob(content_images_paths)
for content_i, content_img_path in enumerate(content_img_list):
content_img_np = image_utils.load_np_image_uint8(content_img_path)[:, :, :3]
content_img_name = os.path.basename(content_img_path)[:-4]
# Saves preprocessed content image.
inp_img_croped_resized_np = self.sess.run(
self.content_img_preprocessed, feed_dict={
self.content_img_ph: content_img_np})
# Computes bottleneck features of the style prediction network for the
# identity transform.
identity_params = self.sess.run(
self.bottleneck_feat, feed_dict={self.style_img_ph: content_img_np})
for style_i, style_img_path in enumerate(style_img_list):
style_img_name = os.path.basename(style_img_path)[:-4]
style_image_np = image_utils.load_np_image_uint8(style_img_path)[:, :, :3]
self.content_img_np = content_img_np
self.style_image_np = style_image_np
self.identity_params = identity_params
self.style_img_name = style_img_name
self.content_img_name = content_img_name
def run(self, output_dir, interpolation_weights):
style_params = self.sess.run(
self.bottleneck_feat, feed_dict={self.style_img_ph: self.style_image_np})
for interp_i, wi in enumerate(interpolation_weights):
stylized_image_res = self.sess.run(
self.stylized_images,
feed_dict={
self.bottleneck_feat:
self.identity_params * (1 - wi) + style_params * wi,
self.content_img_ph:
self.content_img_np
})
# Saves stylized image.
image_utils.save_np_image(
stylized_image_res,
os.path.join(output_dir, '%s_stylized_%s_%d.jpg' % \
(self.content_img_name, self.style_img_name, interp_i)))
magenta_model = Magenta_Model("/mnt/disks/ssd_disk/final/models/",
content_square_crop=False, style_square_crop=False,
style_image_size=256, content_image_size=256)
magenta_model.process_data(style_images_paths="/mnt/disks/ssd_disk/final/data/content_images/*",
content_images_paths="/mnt/disks/ssd_disk/final/data/content_images/*")
magenta_model.run("/mnt/disks/ssd_disk/final/tmp/", [0., 1.])
| 39.88 | 109 | 0.664995 | 4,227 | 0.847944 | 0 | 0 | 0 | 0 | 0 | 0 | 530 | 0.106319 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.