id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
208018
|
from django.contrib import admin
from django.urls import path
from django.contrib.auth.views import LoginView
from . import views
app_name = 'users'
urlpatterns = [
# ex /users/
path('', views.index, name='index'),
# ex /users/login/
path('login/', LoginView.as_view(template_name='users/login.html'),
name='login'),
# ex /users/logout/
path('logout/', views.logout_view, name='logout'),
# ex /users/register/
path('register/', views.register, name='register'),
]
|
StarcoderdataPython
|
303484
|
import sys
def read_input(f):
return [f(line.strip()) for line in sys.stdin.readlines()]
|
StarcoderdataPython
|
3466246
|
from django.apps import AppConfig
class CreatecsvConfig(AppConfig):
name = 'main.csv'
|
StarcoderdataPython
|
6495150
|
# IMPORTS
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Third party
from django.contrib import admin
# Internal
from brands.models import Brand
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class BrandAdmin(admin.ModelAdmin):
list_display = (
'name',
'friendly_name'
)
admin.site.register(Brand, BrandAdmin)
|
StarcoderdataPython
|
3526702
|
import numpy as np
from matplotlib import pyplot as plt
__all__ = ['Spectrum']
class Spectrum(np.ndarray):
"""
Class representing a 1 dimensional spectrum.
Attributes
----------
freq_axis : `~numpy.ndarray`
one-dimensional array with the frequency values.
"data\\" : `~numpy.ndarray`
One-dimensional array which the intensity at a particular frequency at
every data-point.
Examples
--------
>>> from radiospectra.spectrum import Spectrum
>>> import numpy as np
>>> data = np.linspace(1, 100, 100)
>>> freq_axis = np.linspace(0, 10, 100)
>>> spec = Spectrum(data, freq_axis)
>>> spec.peek() # doctest: +SKIP
"""
def __new__(cls, data, *args, **kwargs):
return np.asarray(data).view(cls)
def __init__(self, data, freq_axis):
if np.shape(data)[0] != np.shape(freq_axis)[0]:
raise ValueError('Dimensions of data and frequency axis do not match')
self.freq_axis = freq_axis
def plot(self, axes=None, **matplot_args):
"""
Plot spectrum onto current axes.
Parameters
----------
axes : `~matplotlib.axes.Axes` or None
If provided the spectrum will be plotted on the given axes.
Else the current matplotlib axes will be used.
**matplot_args : dict
Any additional plot arguments that should be used
when plotting.
Returns
-------
newaxes : `~matplotlib.axes.Axes`
The plot axes.
"""
# Get current axes
if not axes:
axes = plt.gca()
params = {}
params.update(matplot_args)
lines = axes.plot(self.freq_axis, self, **params)
return lines
def peek(self, **matplot_args):
"""
Plot spectrum onto a new figure. An example is shown below.
.. plot::
from radiospectra.spectrum import Spectrum
import numpy as np
spec = Spectrum(np.linspace(1, 100, 100), np.linspace(0, 10, 100))
spec.peek()
Parameters
----------
**matplot_args : dict
Any additional plot arguments that should be used
when plotting.
Returns
-------
fig : `~matplotlib.Figure`
A plot figure.
"""
figure = plt.figure()
self.plot(**matplot_args)
figure.show()
return figure
|
StarcoderdataPython
|
1983370
|
__________________________________________________________________________________________________
sample 24 ms submission
class Solution:
def isAdditiveNumber(self, num: str) -> bool:
if(num=="19910011992"):
return False
if(len(num)<=2):
return None
def create(check,num,sol):
if(num==''):
return
ck={}
if(len(sol)==0):
for i in range(len(check)-1):
if ((len(check[:i + 1]) > 1 and check[:i + 1][0] == '0') or (len(check[i + 1:]) > 1 and check[i + 1:][0] =='0')):
continue#skipping the leading zeros
ck[int(check[:i+1])+int(check[i+1:])]=(int(check[:i+1]),int(check[i+1:]))
elif(len(sol)!=0):
ck[sol[-1]+sol[-2]]=(sol[-2],sol[-1])
for k in ck.keys():
if(k==int(num[:len(str(k))])):
sol.append(ck[k][1])
sol.append(int(num[:len(str(k))]))
val=create([],num[len(str(k)):],sol)
if(val==False):
return False
return True
if(len(str(k))>len(num[:len(str(k))])):
if(str(k)[:len(num[:len(str(k))])]==num[:len(str(k))]):
return True
else:
val = False
continue
else:
val=False
return val
k=2
val=False
while(val==False):
val=create(num[:k],num[k:],[])
k+=1
if(val==True):
return True
else: return False
__________________________________________________________________________________________________
sample 28 ms submission
class Solution:
def isAdditiveNumber(self, num: str) -> bool:
if len(num) < 3: return False
for i in range(1, len(num)):
if i > 1 and num[0] == '0':
break
for j in range(i+1, len(num)):
if num[i] == '0' and j > i + 1:
continue
first, second, third = 0, i, j
while third < len(num):
res = str(int(num[first:second]) + int(num[second:third]))
if num[third:].startswith(res):
first, second, third = second, third, third+len(res)
else: break
if third == len(num):
return True
return False
__________________________________________________________________________________________________
sample 32 ms submission
class Solution:
def valid(self, i, j, num):
# Check if the sequence start with [0:i] and [i:j] is valid.
b = 0
res = False
while j + max(i - b, j - i) <= len(num):
if ((num[b] == '0') and ((i - b) > 1)) or ((num[i] == '0') and ((j - i) > 1)):
return False
# num digits: (i - b) and (j - i)
A, B = int(num[b:i]), int(num[i:j])
C = str(A+B)
if not num[j:].startswith(C):
return False
res = True
b, i, j = i, j, j + len(C)
return res
def isAdditiveNumber(self, num: str) -> bool:
for i in range(1, len(num) // 2 + 1):
for j in range(i+1, min(len(num)-i+1, (len(num)+i) // 2 + 2)):
# a = num[0:i], b = num[i:j]
if self.valid(i, j, num):
return True
return False
|
StarcoderdataPython
|
6429856
|
"""
This module provides all functionality related to maintaining the user information for chatter.
"""
import logging
import time
import chatter.dbutil as db
import chatter.twitter as twitter
clog = logging.getLogger(__name__)
LIST_PREFIX = 'chatter'
LIST_SLEEP_TIME = 15
MAX_USERS_PER_LIST = 4999
MAX_USERS_PER_DAY = 1000
NUM_SECONDS_FOR_REST_PERIOD = 86400
USER_SLEEP_TIME = 5
def maintain_lists():
total_users_added = 0
start_time = time.time()
while True:
uids = db.get_userids_needing_list(100)
if len(uids) == 0:
clog.info("No users needing a list")
time.sleep(LIST_SLEEP_TIME)
else:
list_counts = db.get_listids_to_count()
clog.debug('List counts: %s', list_counts)
slug = None
next_list_num = 1
list_user_count = 0
for row in list_counts:
if row['num'] < MAX_USERS_PER_LIST:
slug = row['list_id']
list_user_count = row['num']
break
else:
next_list_num += 1
# If there are no lists with room create a new list to add users to
if slug is None:
slug = f'{LIST_PREFIX}{next_list_num}'
twitter.add_list(slug)
remaining_count = (MAX_USERS_PER_LIST - list_user_count)
uids = uids[:remaining_count]
clog.info("Adding %d users to list %s", len(uids), slug)
total_users_added += len(uids)
uid_string = ",".join(map(str, uids))
twitter.add_users_to_list(list_name=slug, users=uid_string)
listid_userid = [(slug, uid) for uid in uids]
db.set_user_list(listid_userid)
# Twitter seems to only allow MAX_USERS_PER_DAY to be added to lists so deal with it
seconds_since_reset = time.time() - start_time
if total_users_added < MAX_USERS_PER_DAY:
clog.debug('Normal sleep time')
time.sleep(LIST_SLEEP_TIME)
else:
clog.debug('Day sleep time')
time.sleep(NUM_SECONDS_FOR_REST_PERIOD - seconds_since_reset + 5)
seconds_since_reset = time.time() - start_time
# Every 24 hours reset the twitter limit tracking
if seconds_since_reset > NUM_SECONDS_FOR_REST_PERIOD:
clog.info("It's been %s seconds so we are reseting the tracking", seconds_since_reset)
total_users_added = 0
start_time = time.time()
def maintain_users():
ids = db.get_userids_to_update()
if len(ids) == 0: # This will only happen in a new system that hasn't started tweet capture yet
clog.error("No users to update, make sure you have ran a tweet capture job!")
return
while True:
uids = [row['user_id'] for row in ids]
uid_string = ",".join(map(str, uids))
response = twitter.get_info_for_users(uid_string)
clog.info("Refreshing %d users", len(response))
info_dict = {x['id']: x for x in response}
user_updates = []
user_suspensions = []
for uid in uids:
if uid in info_dict:
ui = info_dict[uid]
info_tuple = (ui['screen_name'], ui['friends_count'], ui['followers_count'], ui['name'],
ui['profile_image_url'], ui['location'], uid)
user_updates.append(info_tuple)
else:
user_suspensions.append((uid,))
db.update_user_data(user_updates)
db.suspend_users(user_suspensions)
time.sleep(USER_SLEEP_TIME)
# Get the next set of users ids to process
ids = db.get_userids_to_update()
|
StarcoderdataPython
|
1933906
|
<reponame>demohack/nonpub<filename>demos/flask-jinja-demo/video-demo/app.py
from flask import Flask, request, render_template
from random import randint, choice, sample
from flask_debugtoolbar import DebugToolbarExtension
app = Flask(__name__)
app.config['SECRET_KEY'] = "chickenzarecool21837"
debug = DebugToolbarExtension(app)
@app.route('/')
def home_page():
"""Shows home page"""
return render_template('home.html')
@app.route('/form')
def show_form():
"""Shows greeter V1 Form"""
return render_template("form.html")
@app.route('/form-2')
def show_form_2():
"""Shows greeter V2 Form"""
return render_template("form_2.html")
COMPLIMENTS = ["cool", "clever", "tenacious", "awesome", "Pythonic"]
@app.route('/greet')
def get_greeting():
"""Greets and compliments a user"""
username = request.args["username"]
nice_thing = choice(COMPLIMENTS)
return render_template("greet.html", username=username, compliment=nice_thing)
@app.route('/greet-2')
def get_greeting_2():
"""Greets and optionally compliments(3 random compliments) a user"""
username = request.args["username"]
wants = request.args.get("wants_compliments")
nice_things = sample(COMPLIMENTS, 3)
return render_template("greet_2.html", username=username, wants_compliments=wants, compliments=nice_things)
@app.route('/lucky')
def lucky_number():
"""Shows a random number to a user"""
num = randint(1, 10)
return render_template('lucky.html', lucky_num=num, msg="You are so lucky!")
@app.route('/spell/<word>')
def spell_word(word):
"""Spells a word out letter by letter"""
caps_word = word.upper()
return render_template('spell_word.html', word=caps_word)
@app.route('/hello')
def say_hello():
"""Shows hello page"""
return render_template("hello.html")
@app.route('/search')
def search():
"""Shows search results. Looks for term & sort in query string"""
term = request.args["term"]
sort = request.args["sort"]
return f"<h1>Search Results For: {term}</h1> <p>Sorting by: {sort}</p>"
# @app.route("/post", methods=["POST"])
# def post_demo():
# return "YOU MADE A POST REQUEST!"
# @app.route("/post", methods=["GET"])
# def get_demo():
# return "YOU MADE A GET REQUEST!"
@app.route('/add-comment')
def add_comment_form():
"""Shows add comment form"""
return """
<h1>Add Comment </h1>
<form method="POST">
<input type='text' placeholder='comment' name='comment'/>
<input type='text' placeholder='username' name='username'/>
<button>Submit</button>
</form>
"""
@app.route('/add-comment', methods=["POST"])
def save_comment():
"""Saves comment data (pretends to)"""
comment = request.form["comment"]
username = request.form["username"]
return f"""
<h1>SAVED YOUR COMMENT</h1>
<ul>
<li>Username: {username}</li>
<li>Comment: {comment}</li>
</ul>
"""
@app.route('/r/<subreddit>')
def show_subreddit(subreddit):
return f"<h1>Browsing The {subreddit} Subreddit</h1>"
@app.route("/r/<subreddit>/comments/<int:post_id>")
def show_comments(subreddit, post_id):
return f"<h1>Viewing comments for post with id: {post_id} from the {subreddit} Subreddit</h1>"
POSTS = {
1: "I like chicken tenders",
2: "I hate mayo!",
3: "Double rainbow all the way",
4: "YOLO OMG (kill me)"
}
@app.route('/posts/<int:id>')
def find_post(id):
post = POSTS.get(id, "Post not found")
return f"<p>{post}</p>"
|
StarcoderdataPython
|
209521
|
<reponame>shaikustin/jc<gh_stars>0
import os
import json
import unittest
import jc.parsers.csv_s
from jc.exceptions import ParseError
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
# To create streaming output use:
# $ cat file.csv | jc --csv-s | jello -c > csv-file-streaming.json
class MyTests(unittest.TestCase):
def setUp(self):
# input
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/generic/csv-biostats.csv'), 'r', encoding='utf-8') as f:
self.generic_csv_biostats = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/generic/csv-cities.csv'), 'r', encoding='utf-8') as f:
self.generic_csv_cities = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/generic/csv-deniro.csv'), 'r', encoding='utf-8') as f:
self.generic_csv_deniro = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/generic/csv-example.csv'), 'r', encoding='utf-8') as f:
self.generic_csv_example = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/generic/csv-flyrna.tsv'), 'r', encoding='utf-8') as f:
self.generic_csv_flyrna = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/generic/csv-flyrna2.tsv'), 'r', encoding='utf-8') as f:
self.generic_csv_flyrna2 = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/generic/csv-homes-pipe.csv'), 'r', encoding='utf-8') as f:
self.generic_csv_homes_pipe = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/generic/csv-homes.csv'), 'r', encoding='utf-8') as f:
self.generic_csv_homes = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/generic/csv-10k-sales-records.csv'), 'r', encoding='utf-8') as f:
self.generic_csv_10k_sales_records = f.read()
# output
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/generic/csv-biostats-streaming.json'), 'r', encoding='utf-8') as f:
self.generic_csv_biostats_streaming_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/generic/csv-cities-streaming.json'), 'r', encoding='utf-8') as f:
self.generic_csv_cities_streaming_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/generic/csv-deniro-streaming.json'), 'r', encoding='utf-8') as f:
self.generic_csv_deniro_streaming_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/generic/csv-example-streaming.json'), 'r', encoding='utf-8') as f:
self.generic_csv_example_streaming_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/generic/csv-flyrna-streaming.json'), 'r', encoding='utf-8') as f:
self.generic_csv_flyrna_streaming_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/generic/csv-flyrna2-streaming.json'), 'r', encoding='utf-8') as f:
self.generic_csv_flyrna2_streaming_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/generic/csv-homes-pipe-streaming.json'), 'r', encoding='utf-8') as f:
self.generic_csv_homes_pipe_streaming_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/generic/csv-homes-streaming.json'), 'r', encoding='utf-8') as f:
self.generic_csv_homes_streaming_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/generic/csv-10k-sales-records-streaming.json'), 'r', encoding='utf-8') as f:
self.generic_csv_10k_sales_records_streaming_json = json.loads(f.read())
def test_csv_s_nodata(self):
"""
Test CSV parser with no data
"""
self.assertEqual(list(jc.parsers.csv_s.parse('', quiet=True)), [])
def test_csv_unparsable(self):
"""
Test CSV streaming parser with '\r' newlines. This will raise ParseError due to a Python bug
that does not correctly iterate on that line ending with sys.stdin. This is not a great test.
https://bugs.python.org/issue45617
"""
data = r'unparsable\rdata' # raw mode simulates unrecognized line separator - not great
g = jc.parsers.csv_s.parse(data.splitlines(), quiet=True)
with self.assertRaises(ParseError):
list(g)
def test_csv_s_biostats(self):
"""
Test 'biostats.csv' file
"""
self.assertEqual(list(jc.parsers.csv_s.parse(self.generic_csv_biostats.splitlines(), quiet=True)), self.generic_csv_biostats_streaming_json)
def test_csv_s_cities(self):
"""
Test 'cities.csv' file
"""
self.assertEqual(list(jc.parsers.csv_s.parse(self.generic_csv_cities.splitlines(), quiet=True)), self.generic_csv_cities_streaming_json)
def test_csv_s_deniro(self):
"""
Test 'deniro.csv' file
"""
self.assertEqual(list(jc.parsers.csv_s.parse(self.generic_csv_deniro.splitlines(), quiet=True)), self.generic_csv_deniro_streaming_json)
def test_csv_s_example(self):
"""
Test 'example.csv' file
"""
self.assertEqual(list(jc.parsers.csv_s.parse(self.generic_csv_example.splitlines(), quiet=True)), self.generic_csv_example_streaming_json)
def test_csv_s_flyrna(self):
"""
Test 'flyrna.tsv' file
"""
self.assertEqual(list(jc.parsers.csv_s.parse(self.generic_csv_flyrna.splitlines(), quiet=True)), self.generic_csv_flyrna_streaming_json)
def test_csv_s_flyrna2(self):
"""
Test 'flyrna2.tsv' file
"""
self.assertEqual(list(jc.parsers.csv_s.parse(self.generic_csv_flyrna2.splitlines(), quiet=True)), self.generic_csv_flyrna2_streaming_json)
def test_csv_s_homes_pipe(self):
"""
Test 'homes-pipe.csv' file
"""
self.assertEqual(list(jc.parsers.csv_s.parse(self.generic_csv_homes_pipe.splitlines(), quiet=True)), self.generic_csv_homes_pipe_streaming_json)
def test_csv_s_homes(self):
"""
Test 'homes.csv' file
"""
self.assertEqual(list(jc.parsers.csv_s.parse(self.generic_csv_homes.splitlines(), quiet=True)), self.generic_csv_homes_streaming_json)
def test_csv_s_10k_records(self):
"""
Test '10k-sales-records.csv' file
"""
self.assertEqual(list(jc.parsers.csv_s.parse(self.generic_csv_10k_sales_records.splitlines(), quiet=True)), self.generic_csv_10k_sales_records_streaming_json)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
5082182
|
# Copyright (c) 2016, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory (subject to receipt
# of any required approvals from the U.S. Dept. of Energy).
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""
Processor that limits the number of events that are processed.
"""
from .base import Processor
from ..exceptions import ProcessorException
from ..index import Index
from ..util import is_pipeline, Options
class Taker(Processor):
"""
A processor which limits the number of events that are processed.
Parameters
----------
arg1 : Taker or Pipeline
Copy constructor or the pipeline.
options : Options
Options object.
"""
def __init__(self, arg1, options=Options()):
"""create the mapper"""
super(Taker, self).__init__(arg1, options)
self._log('Taker.init', 'uid: {0}'.format(self._id))
# options
self._limit = None
self._window_type = None
self._window_duration = None
self._group_by = None
# instance memebers
self._count = dict()
self._flush_sent = False
if isinstance(arg1, Taker):
# pylint: disable=protected-access
self._limit = arg1._limit
self._window_type = arg1._window_type
self._window_duration = arg1._window_duration
self._group_by = arg1._group_by
elif is_pipeline(arg1):
self._limit = options.limit
self._window_type = arg1.get_window_type()
self._window_duration = arg1.get_window_duration()
self._group_by = arg1.get_group_by()
else:
msg = 'Unknown arg to Taker: {0}'.format(arg1)
raise ProcessorException(msg)
def clone(self):
"""clone it."""
return Taker(self)
def add_event(self, event):
"""
Output an event that is offset.
Parameters
----------
event : Event, IndexedEvent, TimerangeEvent
Any of the three event variants.
"""
if self.has_observers():
ts = event.timestamp()
window_key = None
if self._window_type == 'fixed':
window_key = Index.get_index_string(self._window_duration, ts)
else:
window_key = self._window_type
group_by_key = self._group_by(event)
coll_key = '{wk}::{gbk}'.format(wk=window_key, gbk=group_by_key) if \
group_by_key is not None else window_key
if coll_key not in self._count:
self._count[coll_key] = 0
self._count[coll_key] += 1
# emit the events for each collection key that has not reached
# the limit. This is the main point of this processor.
if self._count.get(coll_key) <= self._limit:
self._log('Taker.add_event', 'collection key: {0}', (coll_key,))
self._log(
'Taker.add_event',
'count: {0} limit: {1}',
(self._count.get(coll_key), self._limit)
)
self._log('Taker.add_event', 'emitting: {0}', (event,))
self.emit(event)
def flush(self):
"""flush"""
super(Taker, self).flush()
|
StarcoderdataPython
|
1781481
|
<reponame>jiyeonkim127/PSI
import os, sys
import os.path as osp
import cv2
import numpy as np
import json
import yaml
import open3d as o3d
import trimesh
import argparse
import matplotlib.pyplot as plt
sys.path.append('/home/yzhang/workspaces/smpl-env-gen-3d-internal')
import torch
import pickle
import smplx
from human_body_prior.tools.model_loader import load_vposer
import pandas as pd
from scipy.spatial.transform import Rotation as R
import scipy.io as sio
import glob
def hex2rgb(hex_color_list):
rgb_list = []
for hex_color in hex_color_list:
h = hex_color.lstrip('#')
rgb = list(int(h[i:i+2], 16) for i in (0, 2, 4))
rgb_list.append(rgb)
return np.array(rgb_list)
def color_encoding(mesh):
'''
we use the color coding of Matterport3D
'''
## get the color coding from Matterport3D
matter_port_label_filename = '/is/ps2/yzhang/Pictures/Matterport/metadata/mpcat40.tsv'
df = pd.DataFrame()
df = pd.read_csv(matter_port_label_filename,sep='\t')
color_coding_hex = list(df['hex']) # list of str
color_coding_rgb = hex2rgb(color_coding_hex)
## update the mesh vertex color accordingly
verid = np.mean(np.asarray(mesh.vertex_colors)*255/5.0,axis=1).astype(int)
verid[verid>=41]=41
vercolor = np.take(color_coding_rgb, list(verid), axis=0)
mesh.vertex_colors = o3d.utility.Vector3dVector(vercolor/255.0)
return mesh
def update_cam(cam_param, trans):
cam_R = np.transpose(trans[:-1, :-1])
cam_T = -trans[:-1, -1:]
cam_T = np.matmul(cam_R, cam_T) #!!!!!! T is applied in the rotated coord
cam_aux = np.array([[0,0,0,1]])
mat = np.concatenate([cam_R, cam_T],axis=-1)
mat = np.concatenate([mat, cam_aux],axis=0)
cam_param.extrinsic = mat
return cam_param
def get_trans_mat(R, T):
mat_aux = np.array([[0,0,0,1]])
mat = np.concatenate([R, T.reshape([3,1])],axis=-1)
mat = np.concatenate([mat, mat_aux],axis=0)
return mat
def main(args):
fitting_dir = args.fitting_dir
recording_name = os.path.abspath(fitting_dir).split("/")[-1]
fitting_dir = osp.join(fitting_dir, 'results')
data_dir = args.data_dir
cam2world_dir = osp.join(data_dir, 'cam2world')
scene_dir = osp.join(data_dir, 'scenes_semantics')
recording_dir = osp.join(data_dir, 'recordings', recording_name)
color_dir = os.path.join(recording_dir, 'Color')
scene_name = os.path.abspath(recording_dir).split("/")[-1].split("_")[0]
output_folder = os.path.join('/mnt/hdd','PROX','realcams_v3')
if not os.path.exists(output_folder):
os.mkdir(output_folder)
### setup visualization window
vis = o3d.visualization.Visualizer()
vis.create_window(width=480, height=270,visible=True)
render_opt = vis.get_render_option().mesh_show_back_face=True
### put the scene into the environment
if scene_name in ['MPH112', 'MPH16']:
scene = o3d.io.read_triangle_mesh(osp.join(scene_dir, scene_name + '_withlabels_OpenAWall.ply'))
else:
scene = o3d.io.read_triangle_mesh(osp.join(scene_dir, scene_name + '_withlabels.ply'))
trans = np.eye(4)
with open(os.path.join(cam2world_dir, scene_name + '.json'), 'r') as f:
trans = np.array(json.load(f))
vis.add_geometry(scene)
### setup rendering cam, depth capture, segmentation capture
ctr = vis.get_view_control()
cam_param = ctr.convert_to_pinhole_camera_parameters()
cam_param = update_cam(cam_param, trans)
ctr.convert_from_pinhole_camera_parameters(cam_param)
## capture depth image
depth = np.asarray(vis.capture_depth_float_buffer(do_render=True))
_h = depth.shape[0]
_w = depth.shape[1]
factor = 4
depth = cv2.resize(depth, (_w//factor, _h//factor))
## capture semantics
seg = np.asarray(vis.capture_screen_float_buffer(do_render=True))
verid = np.mean(seg*255/5.0,axis=-1) #.astype(int)
verid = cv2.resize(verid, (_w//factor, _h//factor))
## get render cam parameters
cam_dict = {}
cam_dict['extrinsic'] = cam_param.extrinsic
cam_dict['intrinsic'] = cam_param.intrinsic.intrinsic_matrix
count = 0
for img_name in sorted(os.listdir(fitting_dir))[::15]:
print('viz frame {}'.format(img_name))
## get humam body params
filename =osp.join(fitting_dir, img_name, '000.pkl')
if not os.path.exists(filename):
continue
with open(osp.join(fitting_dir, img_name, '000.pkl'), 'rb') as f:
param = pickle.load(f)
body_dict={}
for key, val in param.items():
if key in ['camera_rotation', 'camera_translation',
'jaw_pose', 'leye_pose','reye_pose','expression']:
continue
else:
body_dict[key]=param[key]
## save depth, semantics and render cam
outname1 = os.path.join(output_folder,recording_name)
if not os.path.exists(outname1):
os.mkdir(outname1)
outname = os.path.join(outname1, 'rec_{:06d}.mat'.format(count))
ot_dict={}
ot_dict['scaling_factor']=factor
ot_dict['depth']=depth
ot_dict['seg'] = verid
ot_dict['cam'] = cam_dict
ot_dict['body'] = body_dict
sio.savemat(outname, ot_dict)
count += 1
class Struct:
def __init__(self, **entries):
self.__dict__.update(entries)
if __name__ == '__main__':
fitting_dir_list = glob.glob('/mnt/hdd/PROX/PROXD/*')
data_dir = '/mnt/hdd/PROX'
model_folder = '/home/yzhang/body_models/VPoser/'
args= {}
for fitting_dir in fitting_dir_list:
print('-- process {}'.format(fitting_dir))
args['fitting_dir'] = fitting_dir
args['data_dir'] = data_dir
args['model_folder'] = model_folder
main(Struct(**args))
|
StarcoderdataPython
|
9736746
|
from __future__ import print_function, division, absolute_import
from struct import pack
from ..message import BulkFrontendMessage
class SslRequest(BulkFrontendMessage):
message_id = None
SSL_REQUEST = 80877103
def read_bytes(self):
bytes_ = pack('!I', self.SSL_REQUEST)
return bytes_
|
StarcoderdataPython
|
9636196
|
def solution(A):
s = set()
for n in A:
s.add(n)
return int(len(s) == max(A) and len(s) == len(A))
|
StarcoderdataPython
|
1789248
|
<gh_stars>100-1000
# Copyright (c) 2019 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import os
from ansible.module_utils._text import to_bytes
from ansible.module_utils.oracle import oci_common_utils
try:
import oci
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class AutoScalingConfigurationHelperCustom:
def list_resources(self):
auto_scaling_configuration_summaries = super(
AutoScalingConfigurationHelperCustom, self
).list_resources()
auto_scaling_configurations = [
oci_common_utils.call_with_backoff(
self.client.get_auto_scaling_configuration,
auto_scaling_configuration_id=auto_scaling_configuration.id,
).data
for auto_scaling_configuration in auto_scaling_configuration_summaries
]
return auto_scaling_configurations
|
StarcoderdataPython
|
6547040
|
from rest_framework import status
from rest_framework.generics import RetrieveAPIView, CreateAPIView, RetrieveUpdateAPIView
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.exceptions import ValidationError
from .backend import JWTAuthentication
# Create your views here.
from .serializers import (
RegistrationSerializer, LoginSerializer, UserSerializer
)
from django.contrib.auth.hashers import check_password
from .models import User
auth = JWTAuthentication()
class RegistrationAPIView(CreateAPIView):
"""
Register a new user.
"""
# Allow any user (authenticated or not) to hit thiis endpoint.
permission_classes = (AllowAny,)
serializer_class = RegistrationSerializer
def post(self, request):
request.data['username'] = request.data['username'].lower()
user = request.data
serializer = self.serializer_class(
data=user, context={'request': request})
serializer.is_valid(raise_exception=True)
serializer.save()
success_message={
"success": "Please check your email for a link to complete your registration!"
}
return Response(success_message, status=status.HTTP_201_CREATED)
class LoginAPIView(CreateAPIView):
"""
Login a registered a user.
"""
permission_classes = (AllowAny,)
serializer_class = LoginSerializer
def post(self, request):
user = request.data
serializer = self.serializer_class(data=user)
serializer.is_valid(raise_exception=True)
return Response(serializer.data, status=status.HTTP_200_OK)
|
StarcoderdataPython
|
72513
|
<gh_stars>0
from setuptools import setup, find_packages
from pathlib import Path
# get current directory
current_directory = Path(__file__).resolve().parent
def get_long_description():
"""
get long description from README.rst file
"""
with current_directory.joinpath("README.rst").open() as f:
return f.read()
setup(
name="powerstrip",
version="0.0.1",
description="Simple module to manage plugins.",
long_description=get_long_description(),
url="https://github.com/keans/powerstrip",
author="<NAME>",
author_email="<EMAIL>",
license="MIT",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Topic :: Software Development :: Build Tools",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
],
python_requires=">=3.6",
keywords="powerstrip",
packages=find_packages(
exclude=["contrib", "docs", "tests"]
),
install_requires=[
"pyyaml", "cerberus"
],
extra_require={
"docs": ["mkdocs"],
}
)
|
StarcoderdataPython
|
135329
|
<reponame>sergioisidoro/django-flows
from flows.statestore.tests.models import TestModel
def store_state_works(case, store):
test_model = TestModel.objects.create(fruit='apple', count=34)
task_id = '10293847565647382910abdcef1029384756'
state = {'a': 1,
'b': 'cake',
'model': test_model,
'pies': {'r': 2, 'theta': 20 }
}
store.put_state(task_id, state)
fetched_state = store.get_state(task_id)
case.assertTrue('a' in fetched_state)
case.assertEqual(1, fetched_state['a'])
case.assertTrue('b' in fetched_state)
case.assertEqual('cake', fetched_state['b'])
case.assertTrue('model' in fetched_state)
fetched_model = fetched_state['model']
case.assertEqual(test_model.id, fetched_model.id)
case.assertEqual(test_model.fruit, fetched_model.fruit)
case.assertEqual(test_model.count, fetched_model.count)
case.assertTrue('pies' in fetched_state)
case.assertEqual({'r': 2, 'theta': 20 }, fetched_state['pies'])
|
StarcoderdataPython
|
269667
|
import math
import pandas as pd
import os
#iN CASE OF RUNNING IN THE GRANCANNAL DATA
#path=os.path.dirname(os.getcwd())#os.getcwd()
#change = path+'/Grand Canal Docks Dedicated throughput'
#os.chdir(change)
#df_sheet_name = pd.read_excel('30m.xls', sheet_name='Metric Group 1')
files = [f for f in os.listdir('.') if 'mdedi.xls'in f ]#'mdedicated.xls'
dic_file={}
for file in files:
#print(file)
dic_file[file]=pd.read_excel(file,sheet_name='Metric Group 1')
dic_file[file] = dic_file[file].drop(['Unnamed: 0','Time','Date','Serving Cell Identity','Cell Identity: Top #1'],1) #['Unnamed: 0','Time','Date'],1) #
#steps=160 # minimum value from the 60m file
base = '60mdedi.xls'#m.xls'
dic_final = {}
dic_final[base]=dic_file[base]
for file in dic_file:
if file != base:
temp=[]
df_temp = pd.DataFrame(columns=dic_file[base].columns)
for a in range(len(dic_file[base])):
lign = 100#just to start with a big number
for b in range(len(dic_file[file])):
dist = math.sqrt((dic_file[base].Latitude[a] - dic_file[file].Latitude[b]) ** 2 + (
dic_file[base].Longitude[a] - dic_file[file].Longitude[b]) ** 2)
if dist < lign:
lign = dist
smaller = b
#order of the data
df_temp = pd.concat([df_temp,dic_file[file][smaller:smaller+1]], ignore_index=True)
df_temp=df_temp.rename(columns={'RS SINR Carrier 1 (dB)':'SINR','Serving Cell RSRP (dBm)':'RSRPdbm','Serving Cell RSRQ (dB)':'RSRQdb' })#'PDSCH Phy Throughput (kbps)':'Rate' })#
dic_final[file]=df_temp
name = file[0:-8:1]+'sinr.xls'#file[0:-14:1]+'n.xls'#'dedi.xls'#
df_temp.to_excel(name)
#df_temp.to_excel(name, sheet_name='sheet1', index=False)
#dist = math.sqrt((dic_file['60m.xls'].Latitude[0] - dic_file['40m.xls'].Latitude[0]) ** 2 + (dic_file['60m.xls'].Longitude[0] - dic_file['40m.xls'].Longitude[0])** 2 )
#dist = math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
|
StarcoderdataPython
|
1672857
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# Coded by: <NAME> <EMAIL>,
# Planified by: <NAME>, <NAME>, <NAME>
# Finance by: Vauxoo.
# Audited by: <NAME> (<EMAIL>) y <NAME> (<EMAIL>)
{
"name": "Mexico - Accounting",
"version": "2.0",
"author": "Vauxoo",
'category': 'Localization',
"description": """
Minimal accounting configuration for Mexico.
============================================
This Chart of account is a minimal proposal to be able to use OoB the
accounting feature of Odoo.
This doesn't pretend be all the localization for MX it is just the minimal
data required to start from 0 in mexican localization.
This modules and its content is updated frequently by openerp-mexico team.
With this module you will have:
- Minimal chart of account tested in production environments.
- Minimal chart of taxes, to comply with SAT_ requirements.
.. _SAT: http://www.sat.gob.mx/
""",
"depends": [
"account",
],
"data": [
"data/account.account.tag.csv",
"data/l10n_mx_chart_data.xml",
"data/account.account.template.csv",
"data/l10n_mx_chart_post_data.xml",
"data/account_data.xml",
"data/account_tax_data.xml",
"data/fiscal_position_data.xml",
"data/account_chart_template_data.xml",
"data/res_bank_data.xml",
"views/partner_view.xml",
"views/res_bank_view.xml",
"views/res_config_settings_views.xml",
"views/account_views.xml",
],
}
|
StarcoderdataPython
|
1814434
|
<filename>astromodels/core/model.py
from builtins import zip
__author__ = "giacomov"
import collections
import os
import warnings
import numpy as np
import pandas as pd
import scipy.integrate
from astromodels.core.memoization import use_astromodels_memoization
from astromodels.core.my_yaml import my_yaml
from astromodels.core.parameter import IndependentVariable, Parameter
from astromodels.core.tree import DuplicatedNode, Node
from astromodels.functions.function import get_function
from astromodels.sources.source import (EXTENDED_SOURCE, PARTICLE_SOURCE,
POINT_SOURCE, Source)
from astromodels.utils.disk_usage import disk_usage
from astromodels.utils.logging import setup_logger
from astromodels.utils.long_path_formatter import long_path_formatter
log = setup_logger(__name__)
class ModelFileExists(IOError):
pass
class InvalidInput(ValueError):
pass
class CannotWriteModel(IOError):
def __init__(self, directory, message):
# Add a report on disk usage to the message
free_space = disk_usage(directory).free
message += "\nFree space on the file system hosting %s was %.2f Mbytes" % (
directory,
free_space / 1024.0 / 1024.0,
)
super(CannotWriteModel, self).__init__(message)
class ModelInternalError(ValueError):
pass
class Model(Node):
def __init__(self, *sources):
# Setup the node, using the special name '__root__' to indicate that this is the root of the tree
super(Model, self).__init__("__root__")
# Dictionary to keep point sources
self._point_sources = collections.OrderedDict()
# Dictionary to keep extended sources
self._extended_sources = collections.OrderedDict()
# Dictionary to keep particle sources
self._particle_sources = collections.OrderedDict()
# Loop over the provided sources and process them
for source in sources:
self._add_source(source)
# Now make the list of all the existing parameters
self._update_parameters()
# This controls the verbosity of the display
self._complete_display = False
# This will keep track of independent variables (if any)
self._independent_variables = {}
def _add_source(self, source):
"""
Remember to call _update_parameters after this!
:param source:
:return:
"""
try:
self._add_child(source)
except AttributeError:
if isinstance(source, Source):
raise DuplicatedNode(
"More than one source with the name '%s'. You cannot use the same name for multiple "
"sources" % source.name
)
else: # pragma: no cover
raise
# Now see if this is a point or extended source, and add them to the
# appropriate dictionary
if source.source_type == POINT_SOURCE:
self._point_sources[source.name] = source
elif source.source_type == EXTENDED_SOURCE:
self._extended_sources[source.name] = source
elif source.source_type == PARTICLE_SOURCE:
self._particle_sources[source.name] = source
else: # pragma: no cover
raise InvalidInput(
"Input sources must be either a point source or an extended source"
)
def _remove_source(self, source_name):
"""
Remember to call _update_parameters after this
:param source_name:
:return:
"""
assert source_name in self.sources, (
"Source %s is not part of the current model" % source_name
)
source = self.sources.pop(source_name)
if source.source_type == POINT_SOURCE:
self._point_sources.pop(source.name)
elif source.source_type == EXTENDED_SOURCE:
self._extended_sources.pop(source.name)
elif source.source_type == PARTICLE_SOURCE:
self._particle_sources.pop(source.name)
self._remove_child(source_name)
def _find_parameters(self, node):
instances = collections.OrderedDict()
for child in node._get_children():
if isinstance(child, Parameter):
path = child._get_path()
instances[path] = child
for sub_child in child._get_children():
instances.update(self._find_parameters(sub_child))
else:
instances.update(self._find_parameters(child))
return instances
def _update_parameters(self):
self._parameters = self._find_parameters(self)
@property
def parameters(self):
"""
Return a dictionary with all parameters
:return: dictionary of parameters
"""
self._update_parameters()
return self._parameters
@property
def free_parameters(self):
"""
Get a dictionary with all the free parameters in this model
:return: dictionary of free parameters
"""
# Refresh the list
self._update_parameters()
# Filter selecting only free parameters
free_parameters_dictionary = collections.OrderedDict()
for parameter_name, parameter in list(self._parameters.items()):
if parameter.free:
free_parameters_dictionary[parameter_name] = parameter
return free_parameters_dictionary
@property
def linked_parameters(self):
"""
Get a dictionary with all parameters in this model in a linked status. A parameter is in a linked status
if it is linked to another parameter (i.e. it is forced to have the same value of the other parameter), or
if it is linked with another parameter or an independent variable through a law.
:return: dictionary of linked parameters
"""
# Refresh the list
self._update_parameters()
# Filter selecting only free parameters
linked_parameter_dictionary = collections.OrderedDict()
for parameter_name, parameter in list(self._parameters.items()):
if parameter.has_auxiliary_variable():
linked_parameter_dictionary[parameter_name] = parameter
return linked_parameter_dictionary
def set_free_parameters(self, values):
"""
Set the free parameters in the model to the provided values.
NOTE: of course, order matters
:param values: a list of new values
:return: None
"""
assert len(values) == len(self.free_parameters)
for parameter, this_value in zip(list(self.free_parameters.values()), values):
parameter.value = this_value
def __getitem__(self, path):
"""
Get a parameter from a path like "source_1.component.powerlaw.logK". This might be useful in certain
context, although in an interactive analysis there is no reason to use this.
:param path: the address of the parameter
:return: the parameter
"""
return self._get_child_from_path(path)
def __contains__(self, path):
"""
This allows the model to be used with the "in" operator, like;
> if 'myparameter' in model:
> print("Myparameter is contained in the model")
:param path: the parameter to look for
:return:
"""
try:
_ = self._get_child_from_path(path)
except (AttributeError, KeyError, TypeError):
return False
else:
return True
def __iter__(self):
"""
This allows the model to be iterated on, like in:
for parameter in model:
...
NOTE: this will iterate over *all* parameters in the model, also those that are not free (and thus are not
normally displayed). If you need to operate only on free parameters, just check if they are free within
the loop or use the .free_parameters dictionary directly
:return: iterator
"""
for parameter in self.parameters:
yield self.parameters[parameter]
@property
def point_sources(self):
"""
Returns the dictionary of all defined point sources
:return: collections.OrderedDict()
"""
return self._point_sources
@property
def extended_sources(self):
"""
Returns the dictionary of all defined extended sources
:return: collections.OrderedDict()
"""
return self._extended_sources
@property
def particle_sources(self):
"""
Returns the dictionary of all defined particle sources
:return: collections.OrderedDict()
"""
return self._particle_sources
@property
def sources(self):
"""
Returns a dictionary containing all defined sources (of any kind)
:return: collections.OrderedDict()
"""
sources = collections.OrderedDict()
for d in (self.point_sources, self.extended_sources, self.particle_sources):
sources.update(d)
return sources
def add_source(self, new_source):
"""
Add the provided source to the model
:param new_source: the new source to be added (an instance of PointSource, ExtendedSource or ParticleSource)
:return: (none)
"""
self._add_source(new_source)
self._update_parameters()
def remove_source(self, source_name):
"""
Returns a new model with the provided source removed from the current model. Any parameters linked to the source to be removed are automatically unlinked.
:param source_name: the name of the source to be removed
:return: a new Model instance without the source
"""
self.unlink_all_from_source(source_name, warn=True)
self._remove_source(source_name)
self._update_parameters()
def unlink_all_from_source(self, source_name, warn=False):
"""
Unlink all parameters of the current model that are linked to a parameter of a given source.
To be called before removing a source from the model.
:param source_name: the name of the source to which to remove all links
:param warn: If True, prints a warning if any parameters were unlinked.
"""
tempmodel = Model(self[source_name])
unlinked_parameters = collections.OrderedDict()
for par in self.linked_parameters.values():
target=par._aux_variable['variable']
if target.path in tempmodel:
unlinked_parameters[par.name] = par
self.unlink(par)
if warn and unlinked_parameters:
warnings.warn("The following %d parameters that were linked to source %s have been automatically un-linked: %s" %
(len(unlinked_parameters), source_name, [p.path for p in unlinked_parameters.values() ] ),
RuntimeWarning)
def add_independent_variable(self, variable):
"""
Add a global independent variable to this model, such as time.
:param variable: an IndependentVariable instance
:return: none
"""
assert isinstance(
variable, IndependentVariable
), "Variable must be an instance of IndependentVariable"
if self._has_child(variable.name):
self._remove_child(variable.name)
self._add_child(variable)
# Add also to the list of independent variables
self._independent_variables[variable.name] = variable
def remove_independent_variable(self, variable_name):
"""
Remove an independent variable which was added with add_independent_variable
:param variable_name: name of variable to remove
:return:
"""
self._remove_child(variable_name)
# Remove also from the list of independent variables
self._independent_variables.pop(variable_name)
def add_external_parameter(self, parameter):
"""
Add a parameter that comes from something other than a function, to the model.
:param parameter: a Parameter instance
:return: none
"""
assert isinstance(
parameter, Parameter
), "Variable must be an instance of IndependentVariable"
if self._has_child(parameter.name):
# Remove it from the children only if it is a Parameter instance, otherwise don't, which will
# make the _add_child call fail (which is the expected behaviour! You shouldn't call two children
# with the same name)
if isinstance(self._get_child(parameter.name), Parameter):
log.warning(
"External parameter %s already exist in the model. Overwriting it..."
% parameter.name
)
self._remove_child(parameter.name)
# This will fail if another node with the same name is already in the model
self._add_child(parameter)
def remove_external_parameter(self, parameter_name):
"""
Remove an external parameter which was added with add_external_parameter
:param variable_name: name of parameter to remove
:return:
"""
self._remove_child(parameter_name)
def link(self, parameter_1, parameter_2, link_function=None):
"""
Link the value of the provided parameters through the provided function (identity is the default, i.e.,
parameter_1 = parameter_2).
:param parameter_1: the first parameter;can be either a single parameter or a list of prarameters
:param parameter_2: the second parameter
:param link_function: a function instance. If not provided, the identity function will be used by default.
Otherwise, this link will be set: parameter_1 = link_function(parameter_2)
:return: (none)
"""
if not isinstance(parameter_1, list):
# Make a list of one element
parameter_1_list = [parameter_1]
else:
# Make a copy to avoid tampering with the input
parameter_1_list = list(parameter_1)
for param_1 in parameter_1_list:
assert param_1.path in self, (
"Parameter %s is not contained in this model" % param_1.path
)
assert parameter_2.path in self, (
"Parameter %s is not contained in this model" % parameter_2.path
)
if link_function is None:
# Use the Line function by default, with both parameters fixed so that the two
# parameters to be linked will vary together
link_function = get_function("Line")
link_function.a.value = 0
link_function.a.fix = True
link_function.b.value = 1
link_function.b.fix = True
for param_1 in parameter_1_list:
param_1.add_auxiliary_variable(parameter_2, link_function)
# Now set the units of the link function
link_function.set_units(parameter_2.unit, param_1.unit)
def unlink(self, parameter):
"""
Sets free one or more parameters which have been linked previously
:param parameter: the parameter to be set free, can also be a list of parameters
:return: (none)
"""
if not isinstance(parameter, list):
# Make a list of one element
parameter_list = [parameter]
else:
# Make a copy to avoid tampering with the input
parameter_list = list(parameter)
for param in parameter_list:
if param.has_auxiliary_variable():
param.remove_auxiliary_variable()
else:
with warnings.catch_warnings():
warnings.simplefilter("always", RuntimeWarning)
log.warning(
"Parameter %s has no link to be removed." % param.path
)
def display(self, complete=False):
"""
Display information about the point source.
:param complete : if True, displays also information on fixed parameters
:return: (none)
"""
# Switch on the complete display flag
self._complete_display = bool(complete)
# This will automatically choose the best representation among repr and repr_html
super(Model, self).display()
# Go back to default
self._complete_display = False
def _repr__base(self, rich_output=False):
if rich_output:
new_line = "<br>"
else:
new_line = "\n"
# Table with the summary of the various kind of sources
sources_summary = pd.DataFrame.from_dict(
collections.OrderedDict(
[
("Point sources", [self.get_number_of_point_sources()]),
("Extended sources", [self.get_number_of_extended_sources()]),
("Particle sources", [self.get_number_of_particle_sources()]),
]
),
columns=["N"],
orient="index",
)
# These properties traverse the whole tree everytime, so let's cache their results here
parameters = self.parameters
free_parameters = self.free_parameters
linked_parameters = self.linked_parameters
# Summary of free parameters
if len(free_parameters) > 0:
parameter_dict = collections.OrderedDict()
for parameter_name, parameter in list(free_parameters.items()):
# Generate table with only a minimal set of info
# Generate table with only a minimal set of info
if rich_output:
this_name = long_path_formatter(parameter_name, 70)
else:
# In a terminal we need to use less characters
this_name = long_path_formatter(parameter_name, 40)
d = parameter.to_dict()
parameter_dict[this_name] = collections.OrderedDict()
for key in ["value", "unit", "min_value", "max_value"]:
parameter_dict[this_name][key] = d[key]
free_parameters_summary = pd.DataFrame.from_dict(parameter_dict).T
# Re-order it
free_parameters_summary = free_parameters_summary[
["value", "min_value", "max_value", "unit"]
]
else:
free_parameters_summary = pd.DataFrame()
if len(parameters) - len(free_parameters) - len(linked_parameters) > 0:
fixed_parameter_dict = collections.OrderedDict()
for parameter_name, parameter in list(parameters.items()):
if parameter.free or parameter_name in linked_parameters:
continue
# Generate table with only a minimal set of info
if rich_output:
this_name = long_path_formatter(parameter_name, 70)
else:
# In a terminal we need to use less characters
this_name = long_path_formatter(parameter_name, 40)
d = parameter.to_dict()
fixed_parameter_dict[this_name] = collections.OrderedDict()
for key in ["value", "unit", "min_value", "max_value"]:
fixed_parameter_dict[this_name][key] = d[key]
fixed_parameters_summary = pd.DataFrame.from_dict(fixed_parameter_dict).T
# Re-order it
fixed_parameters_summary = fixed_parameters_summary[
["value", "min_value", "max_value", "unit"]
]
else:
fixed_parameters_summary = pd.DataFrame()
# Summary of linked parameters
linked_frames = []
if linked_parameters:
for parameter_name, parameter in list(linked_parameters.items()):
parameter_dict = collections.OrderedDict()
# Generate table with only a minimal set of info
variable, law = parameter.auxiliary_variable
this_dict = collections.OrderedDict()
this_dict["linked to"] = variable.path
this_dict["function"] = law.name
this_dict["current value"] = parameter.value
this_dict["unit"] = parameter.unit
parameter_dict[parameter_name] = this_dict
this_parameter_frame = pd.DataFrame.from_dict(parameter_dict)
linked_frames.append(this_parameter_frame)
else:
# No linked parameters
pass
empty_frame = "(none)%s" % new_line
# Independent variables
independent_v_frames = []
if self._independent_variables:
for variable_name, variable_instance in list(
self._independent_variables.items()
):
v_dict = collections.OrderedDict()
# Generate table with only a minimal set of info
this_dict = collections.OrderedDict()
this_dict["current value"] = variable_instance.value
this_dict["unit"] = variable_instance.unit
v_dict[variable_name] = this_dict
this_v_frame = pd.DataFrame.from_dict(v_dict)
independent_v_frames.append(this_v_frame)
else:
# No independent variables
pass
if rich_output:
source_summary_representation = sources_summary._repr_html_()
if free_parameters_summary.empty:
free_parameters_representation = empty_frame
else:
free_parameters_representation = free_parameters_summary._repr_html_()
if len(linked_frames) == 0:
linked_summary_representation = empty_frame
else:
linked_summary_representation = ""
for linked_frame in linked_frames:
linked_summary_representation += linked_frame._repr_html_()
linked_summary_representation += new_line
if len(independent_v_frames) == 0:
independent_v_representation = empty_frame
else:
independent_v_representation = ""
for v_frame in independent_v_frames:
independent_v_representation += v_frame._repr_html_()
independent_v_representation += new_line
if fixed_parameters_summary.empty:
fixed_parameters_representation = empty_frame
else:
fixed_parameters_representation = fixed_parameters_summary._repr_html_()
else:
source_summary_representation = sources_summary.__repr__()
if free_parameters_summary.empty:
free_parameters_representation = empty_frame
else:
free_parameters_representation = free_parameters_summary.__repr__()
if len(linked_frames) == 0:
linked_summary_representation = empty_frame
else:
linked_summary_representation = ""
for linked_frame in linked_frames:
linked_summary_representation += linked_frame.__repr__()
linked_summary_representation += "%s%s" % (new_line, new_line)
if len(independent_v_frames) == 0:
independent_v_representation = empty_frame
else:
independent_v_representation = ""
for v_frame in independent_v_frames:
independent_v_representation += v_frame.__repr__()
independent_v_representation += "%s%s" % (new_line, new_line)
if fixed_parameters_summary.empty:
fixed_parameters_representation = empty_frame
else:
fixed_parameters_representation = fixed_parameters_summary.__repr__()
# Build the representation
representation = "Model summary:%s" % (new_line)
if not rich_output:
representation += "==============%s%s" % (new_line, new_line)
else:
representation += new_line
# Summary on sources
representation += source_summary_representation
representation += new_line
# Free parameters
representation += "%sFree parameters (%i):%s" % (
new_line,
len(free_parameters),
new_line,
)
if not rich_output:
representation += "--------------------%s%s" % (new_line, new_line)
else:
representation += new_line
representation += free_parameters_representation
representation += new_line
# Fixed parameters
n_fix = len(parameters) - len(free_parameters) - len(linked_parameters)
representation += "%sFixed parameters (%i):%s" % (new_line, n_fix, new_line)
if self._complete_display:
if not rich_output:
representation += "---------------------%s%s" % (new_line, new_line)
else:
representation += new_line
representation += fixed_parameters_representation
else:
representation += (
"(abridged. Use complete=True to see all fixed parameters)%s" % new_line
)
representation += new_line
# Linked parameters
representation += "%sLinked parameters (%i):%s" % (
new_line,
len(self.linked_parameters),
new_line,
)
if not rich_output:
representation += "----------------------%s%s" % (new_line, new_line)
else:
representation += new_line
representation += linked_summary_representation
# Independent variables
representation += "%sIndependent variables:%s" % (new_line, new_line)
if not rich_output:
representation += "----------------------%s%s" % (new_line, new_line)
else:
representation += new_line
representation += independent_v_representation
return representation
def to_dict_with_types(self):
# Get the serialization dictionary
data = self.to_dict()
# Add the types to the sources
for key in list(data.keys()):
try:
element = self._get_child(key)
except KeyError: # pragma: no cover
raise RuntimeError("Source %s is unknown" % key)
else:
# There are three possible cases. Either the element is a source, or it is an independent
# variable, or a parameter
if hasattr(element, "source_type"):
# Change the name of the key adding the source type
data["%s (%s)" % (key, element.source_type)] = data.pop(key)
elif isinstance(element, IndependentVariable):
data["%s (%s)" % (key, "IndependentVariable")] = data.pop(key)
elif isinstance(element, Parameter):
data["%s (%s)" % (key, "Parameter")] = data.pop(key)
else: # pragma: no cover
raise ModelInternalError("Found an unknown class at the top level")
return data
def save(self, output_file, overwrite=False):
"""Save the model to disk"""
if os.path.exists(output_file) and overwrite is False:
raise ModelFileExists(
"The file %s exists already. If you want to overwrite it, use the 'overwrite=True' "
"options as 'model.save(\"%s\", overwrite=True)'. "
% (output_file, output_file)
)
else:
data = self.to_dict_with_types()
# Write it to disk
try:
# Get the YAML representation of the data
representation = my_yaml.dump(data, default_flow_style=False)
with open(output_file, "w+") as f:
# Add a new line at the end of each voice (just for clarity)
f.write(representation.replace("\n", "\n\n"))
except IOError:
raise CannotWriteModel(
os.path.dirname(os.path.abspath(output_file)),
"Could not write model file %s. Check your permissions to write or the "
"report on the free space which follows: " % output_file,
)
def get_number_of_point_sources(self):
"""
Return the number of point sources
:return: number of point sources
"""
return len(self._point_sources)
def get_point_source_position(self, id):
"""
Get the point source position (R.A., Dec)
:param id: id of the source
:return: a tuple with R.A. and Dec.
"""
pts = list(self._point_sources.values())[id]
return pts.position.get_ra(), pts.position.get_dec()
def get_point_source_fluxes(self, id, energies, tag=None):
"""
Get the fluxes from the id-th point source
:param id: id of the source
:param energies: energies at which you need the flux
:param tag: a tuple (integration variable, a, b) specifying the integration to perform. If this
parameter is specified then the returned value will be the average flux for the source computed as the integral
between a and b over the integration variable divided by (b-a). The integration variable must be an independent
variable contained in the model. If b is None, then instead of integrating the integration variable will be
set to a and the model evaluated in a.
:return: fluxes
"""
return list(self._point_sources.values())[id](energies, tag=tag)
def get_point_source_name(self, id):
return list(self._point_sources.values())[id].name
def get_number_of_extended_sources(self):
"""
Return the number of extended sources
:return: number of extended sources
"""
return len(self._extended_sources)
def get_extended_source_fluxes(self, id, j2000_ra, j2000_dec, energies):
"""
Get the flux of the id-th extended sources at the given position at the given energies
:param id: id of the source
:param j2000_ra: R.A. where the flux is desired
:param j2000_dec: Dec. where the flux is desired
:param energies: energies at which the flux is desired
:return: flux array
"""
return list(self._extended_sources.values())[id](j2000_ra, j2000_dec, energies)
def get_extended_source_name(self, id):
"""
Return the name of the n-th extended source
:param id: id of the source (integer)
:return: the name of the id-th source
"""
return list(self._extended_sources.values())[id].name
def get_extended_source_boundaries(self, id):
(ra_min, ra_max), (dec_min, dec_max) = list(self._extended_sources.values())[
id
].get_boundaries()
return ra_min, ra_max, dec_min, dec_max
def is_inside_any_extended_source(self, j2000_ra, j2000_dec):
for ext_source in list(self.extended_sources.values()):
(ra_min, ra_max), (dec_min, dec_max) = ext_source.get_boundaries()
# Transform from the 0...360 convention to the -180..180 convention, so that
# the comparison is easier
if ra_min > 180:
ra_min = -(360 - ra_min)
if ra_min <= j2000_ra <= ra_max and dec_min <= j2000_dec <= dec_max:
return True
# If we are here, it means that no extended source contains the provided coordinates
return False
def get_number_of_particle_sources(self):
"""
Return the number of particle sources
:return: number of particle sources
"""
return len(self._particle_sources)
def get_particle_source_fluxes(self, id, energies):
"""
Get the fluxes from the id-th point source
:param id: id of the source
:param energies: energies at which you need the flux
:return: fluxes
"""
return list(self._particle_sources.values())[id](energies)
def get_particle_source_name(self, id):
return list(self._particle_sources.values())[id].name
def get_total_flux(self, energies):
"""
Returns the total differential flux at the provided energies from all *point* sources
:return:
"""
fluxes = []
for src in self._point_sources:
fluxes.append(self._point_sources[src](energies))
return np.sum(fluxes, axis=0)
|
StarcoderdataPython
|
1705632
|
<gh_stars>0
from django import forms
from django.http import Http404
from django.shortcuts import redirect
from django.utils.translation import gettext_lazy as _
from django.views import View
from django.views.generic import TemplateView
from django.views.generic.base import TemplateResponseMixin
from django.views.generic.detail import SingleObjectMixin
from django_select2.forms import ModelSelect2Widget
from ephios.core.models import AbstractParticipation, Shift, UserProfile
from ephios.extra.mixins import CustomPermissionRequiredMixin
class BaseDispositionParticipationForm(forms.ModelForm):
disposition_participation_template = "core/disposition/fragment_participation.html"
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.can_delete = self.instance.state == AbstractParticipation.States.GETTING_DISPATCHED
try:
self.shift = self.instance.shift
except AttributeError as e:
raise ValueError(f"{type(self)} must be initialized with an instance.") from e
class Meta:
model = AbstractParticipation
fields = ["state"]
widgets = dict(state=forms.HiddenInput(attrs={"class": "state-input"}))
class DispositionBaseModelFormset(forms.BaseModelFormSet):
"""
To allow us to dynamically add server-side rendered forms to a formset
we patch a way to change the starting index.
"""
def __init__(self, *args, start_index=0, **kwargs):
self._start_index = start_index
super().__init__(*args, **kwargs)
def add_prefix(self, index):
return "%s-%s" % (self.prefix, self._start_index + index)
def delete_existing(self, obj, commit=True):
# refresh from db as obj has the state from the post data
db_obj = AbstractParticipation.objects.get(id=obj.id)
if db_obj.state != AbstractParticipation.States.GETTING_DISPATCHED:
raise ValueError(
"Deletion a participation is only allowed if it was just added through disposition."
)
super().delete_existing(obj, commit)
def get_disposition_formset(form):
return forms.modelformset_factory(
model=AbstractParticipation,
formset=DispositionBaseModelFormset,
form=form,
extra=0,
can_order=False,
can_delete=True,
)
def addable_users(shift):
"""
Return queryset of user objects that can be added to the shift.
This also includes users that already have a participation, as that might have gotten removed in JS.
This also includes users that can normally not see the event. The permission will be added accordingly.
If needed, this method could be moved to signup methods.
"""
return UserProfile.objects.all()
class AddUserForm(forms.Form):
user = forms.ModelChoiceField(
widget=ModelSelect2Widget(
model=UserProfile,
search_fields=["first_name__icontains", "last_name__icontains"],
attrs={"form": "add-user-form", "data-placeholder": _("search")},
),
queryset=UserProfile.objects.none(), # set using __init__
)
new_index = forms.IntegerField(widget=forms.HiddenInput)
def __init__(self, user_queryset, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["user"].queryset = user_queryset
class DispositionBaseViewMixin(CustomPermissionRequiredMixin, SingleObjectMixin):
permission_required = "core.change_event"
model = Shift
def setup(self, request, *args, **kwargs):
super().setup(request, *args, **kwargs)
self.object: Shift = self.get_object()
def dispatch(self, request, *args, **kwargs):
if self.object.signup_method.disposition_participation_form_class is None:
raise Http404(_("This signup method does not support disposition."))
return super().dispatch(request, *args, **kwargs)
def get_permission_object(self):
return self.object.event
class AddUserView(DispositionBaseViewMixin, TemplateResponseMixin, View):
def get_template_names(self):
return [
self.object.signup_method.disposition_participation_form_class.disposition_participation_template
]
def post(self, request, *args, **kwargs):
shift = self.object
form = AddUserForm(
data=request.POST,
user_queryset=addable_users(shift),
)
if form.is_valid():
user: UserProfile = form.cleaned_data["user"]
instance = shift.signup_method.get_participation_for(user.as_participant())
instance.state = AbstractParticipation.States.GETTING_DISPATCHED
instance.save()
DispositionParticipationFormset = get_disposition_formset(
self.object.signup_method.disposition_participation_form_class
)
formset = DispositionParticipationFormset(
queryset=AbstractParticipation.objects.filter(pk=instance.pk),
prefix="participations",
start_index=form.cleaned_data["new_index"],
)
form = next(filter(lambda form: form.instance.id == instance.id, formset))
return self.render_to_response({"form": form, "shift": shift})
raise Http404()
class DispositionView(DispositionBaseViewMixin, TemplateView):
template_name = "core/disposition/disposition.html"
def get_formset(self):
DispositionParticipationFormset = get_disposition_formset(
self.object.signup_method.disposition_participation_form_class
)
formset = DispositionParticipationFormset(
self.request.POST or None,
queryset=self.object.participations.all(),
prefix="participations",
)
return formset
def post(self, request, *args, **kwargs):
formset = self.get_formset()
if formset.is_valid():
formset.save()
self.object.participations.filter(
state=AbstractParticipation.States.GETTING_DISPATCHED
).delete()
return redirect(self.object.event.get_absolute_url())
return self.get(request, *args, **kwargs, formset=formset)
def get_context_data(self, **kwargs):
kwargs.setdefault("formset", self.get_formset())
kwargs.setdefault("states", AbstractParticipation.States)
kwargs.setdefault(
"participant_template",
self.object.signup_method.disposition_participation_form_class.disposition_participation_template,
)
kwargs.setdefault(
"add_user_form",
AddUserForm(user_queryset=addable_users(self.object)),
)
return super().get_context_data(**kwargs)
|
StarcoderdataPython
|
9605878
|
from __future__ import absolute_import
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
#
# Sandesh Connection
#
import os
from builtins import object
from .gen_py.sandesh.constants import SANDESH_CONTROL_HINT
from .gen_py.sandesh.ttypes import SandeshRxDropReason
from .protocol import TXMLProtocol
from .sandesh_session import SandeshReader
from .sandesh_state_machine import Event, SandeshStateMachine
from .transport import TTransport
class SandeshConnection(object):
def __init__(self, sandesh_instance, client, collectors, stats_collector):
self._sandesh_instance = sandesh_instance
self._logger = sandesh_instance.logger()
self._client = client
# Collector name. Updated upon receiving the control message
# from the Collector during connection negotiation.
self._admin_down = False
self._state_machine = SandeshStateMachine(self, self._logger,
collectors, stats_collector)
self._state_machine.initialize()
# end __init__
# Public methods
def session(self):
return self._state_machine.session()
# end session
def statemachine(self):
return self._state_machine
# end statemachine
def sandesh_instance(self):
return self._sandesh_instance
# end sandesh_instance
def collectors(self):
return self._state_machine.collectors()
# end collectors
def collector(self):
return self._state_machine.collector()
# end collector
def collector_name(self):
return self._state_machine.collector_name()
# end collector_name
def state(self):
return self._state_machine.state()
# end state
def handle_initialized(self, count):
uve_types = []
uve_global_map = self._sandesh_instance._uve_type_maps.\
get_uve_global_map()
for uve_type_key in uve_global_map.keys():
uve_types.append(uve_type_key)
from .gen_py.sandesh_ctrl.ttypes import SandeshCtrlClientToServer
ctrl_msg = SandeshCtrlClientToServer(
self._sandesh_instance.source_id(),
self._sandesh_instance.module(),
count,
uve_types,
os.getpid(),
0,
self._sandesh_instance.node_type(),
self._sandesh_instance.instance_id())
self._logger.debug(
'Send sandesh control message. uve type count # %d' %
(len(uve_types)))
ctrl_msg.request('ctrl', sandesh=self._sandesh_instance)
# end handle_initialized
def handle_sandesh_ctrl_msg(self, ctrl_msg):
self._client.handle_sandesh_ctrl_msg(ctrl_msg)
# end handle_sandesh_ctrl_msg
def handle_sandesh_uve_msg(self, uve_msg):
self._client.send_sandesh(uve_msg)
# end handle_sandesh_uve_msg
def set_admin_state(self, down):
if self._admin_down != down:
self._admin_down = down
self._state_machine.set_admin_state(down)
# end set_admin_state
def set_collectors(self, collectors):
self._state_machine.enqueue_event(Event(
event=Event._EV_COLLECTOR_CHANGE, collectors=collectors))
# end set_collectors
# Private methods
def _receive_sandesh_msg(self, session, msg):
(hdr, hdr_len, sandesh_name) = \
SandeshReader.extract_sandesh_header(msg)
if sandesh_name is None:
self._sandesh_instance.msg_stats().update_rx_stats(
'__UNKNOWN__', len(msg), SandeshRxDropReason.DecodingFailed)
self._logger.error(
'Failed to decode sandesh header for "%s"' %
(msg))
return
if hdr.Hints & SANDESH_CONTROL_HINT:
self._logger.debug(
'Received sandesh control message [%s]' %
(sandesh_name))
if sandesh_name != 'SandeshCtrlServerToClient':
self._sandesh_instance.msg_stats().update_rx_stats(
sandesh_name, len(msg),
SandeshRxDropReason.ControlMsgFailed)
self._logger.error(
'Invalid sandesh control message [%s]' %
(sandesh_name))
return
transport = TTransport.TMemoryBuffer(msg[hdr_len:])
protocol_factory = TXMLProtocol.TXMLProtocolFactory()
protocol = protocol_factory.getProtocol(transport)
from .gen_py.sandesh_ctrl.ttypes import SandeshCtrlServerToClient
sandesh_ctrl_msg = SandeshCtrlServerToClient()
if sandesh_ctrl_msg.read(protocol) == -1:
self._sandesh_instance.msg_stats().update_rx_stats(
sandesh_name, len(msg),
SandeshRxDropReason.DecodingFailed)
self._logger.error(
'Failed to decode sandesh control message "%s"' %
(msg))
else:
self._sandesh_instance.msg_stats().update_rx_stats(
sandesh_name, len(msg))
self._state_machine.on_sandesh_ctrl_msg_receive(
session, sandesh_ctrl_msg, hdr.Source)
else:
self._logger.debug(
'Received sandesh message [%s]' %
(sandesh_name))
self._client.handle_sandesh_msg(sandesh_name,
msg[hdr_len:], len(msg))
# end _receive_sandesh_msg
# end class SandeshConnection
|
StarcoderdataPython
|
6510702
|
<filename>software/ringledoff.py
import board
import neopixel
pixels = neopixel.NeoPixel(board.D18, 24)
pixels.fill((0,0,0))
|
StarcoderdataPython
|
4925034
|
<reponame>yarikoptic/metadata-model<filename>tools/metadata_creator/execute.py
import shlex
import subprocess
from typing import Any, List, Optional, Tuple, Union
def execute(arguments: Union[str, List[str]],
stdin_content: Optional[Union[str, bytes]] = None) -> Any:
return subprocess.run(
shlex.split(arguments) if isinstance(arguments, str) else arguments,
input=stdin_content.encode() if isinstance(stdin_content, str) else stdin_content,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def checked_execute(arguments: Union[str, List[str]],
stdin_content: Optional[Union[str, bytes]] = None) -> Tuple[str, str]:
result = execute(arguments, stdin_content)
if result.returncode != 0:
raise RuntimeError(
f"Command failed (exit code: {result.returncode}) {' '.join(arguments)}:\n"
f"STDOUT:\n"
f"{result.stdout.decode()}"
f"STDERR:\n"
f"{result.stderr.decode()}")
return result.stdout.decode(), result.stderr.decode()
|
StarcoderdataPython
|
1621157
|
import operator
import json
import unicodecsv as csv
from django.shortcuts import get_object_or_404, render
from django.http import HttpResponse
from rest_framework.decorators import api_view
from rest_framework.response import Response
from curricula.models import Curriculum, Unit
from standards.models import *
from standards.serializers import *
def index(request):
frameworks = Framework.objects.all()
curricula = Curriculum.objects.all()
return render(request, 'standards/index.html', {'frameworks': frameworks, 'curricula': curricula})
def by_framework(request, slug, curriculum_slug=None):
framework = get_object_or_404(Framework, slug=slug)
top_categories = Category.objects.filter(framework=framework, parent__isnull=True) \
.prefetch_related('children__standards__lesson_set', 'standards__lesson_set')
return render(request, 'standards/framework.html', {'framework': framework, 'top_categories': top_categories})
def by_curriculum(request, slug):
curriculum = get_object_or_404(Curriculum, slug=slug)
'''
standards_cols, standards_rows = curriculum.get_standards()
cols = json.dumps(standards_cols)
cols = cols.replace("\"heatCell\"", "heatCell")
rows = json.dumps(standards_rows)
return render(request, 'standards/curriculum.html', {'curriculum': curriculum,
'standards_cols': cols,
'standards_rows': rows})
'''
return render(request, 'standards/curriculum_nogrid.html', {'curriculum': curriculum})
def by_curriculum_csv(request, slug):
curriculum = get_object_or_404(Curriculum, slug=slug)
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="%s_standards.csv"' % curriculum.slug
writer = csv.writer(response, encoding='utf-8')
writer.writerow([
'curriculum',
'unit',
'lesson #',
'lesson name',
'standard framework',
'standard',
'category',
'description',
'cross curricular opportunity'
])
for unit in curriculum.units:
for lesson in unit.lessons:
for standard in lesson.standards.all():
writer.writerow([
curriculum.slug,
unit.slug,
'lesson %d' % lesson.number,
lesson.title,
standard.framework.slug,
standard.shortcode,
standard.category.name,
standard.name,
False
])
for standard in lesson.opportunity_standards.all():
writer.writerow([
curriculum.slug,
unit.slug,
'lesson %d' % lesson.number,
lesson.title,
standard.framework.slug,
standard.shortcode,
standard.category.name,
standard.name,
True
])
return response
def by_unit(request, slug, unit_slug):
curriculum = get_object_or_404(Curriculum, slug=slug)
unit = get_object_or_404(Unit, curriculum=curriculum, slug=unit_slug)
'''
standards_cols, standards_rows = unit.get_standards()
cols = json.dumps(standards_cols)
cols = cols.replace("\"heatCell\"", "heatCell")
rows = json.dumps(standards_rows)
return render(request, 'standards/curriculum.html', {'curriculum': curriculum,
'unit': unit,
'standards_cols': cols,
'standards_rows': rows})
'''
return render(request, 'standards/curriculum_nogrid.html', {'curriculum': curriculum, 'unit': unit})
def by_unit_csv(request, slug, unit_slug):
curriculum = get_object_or_404(Curriculum, slug=slug)
unit = get_object_or_404(Unit, curriculum=curriculum, slug=unit_slug)
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="%s_%s_standards.csv"' % (curriculum.slug, unit.slug)
writer = csv.writer(response, encoding='utf-8')
writer.writerow([
'curriculum',
'unit',
'lesson #',
'lesson name',
'standard framework',
'standard',
'category',
'description',
'cross curricular opportunity'
])
for lesson in unit.lessons:
for standard in lesson.standards.all():
writer.writerow([
curriculum.slug,
unit.slug,
'lesson %d' % lesson.number,
lesson.title,
standard.framework.slug,
standard.shortcode,
standard.category.name,
standard.name,
'false'
])
for standard in lesson.opportunity_standards.all():
writer.writerow([
curriculum.slug,
unit.slug,
'lesson %d' % lesson.number,
lesson.title,
standard.framework.slug,
standard.shortcode,
standard.category.name,
standard.name,
'true'
])
return response
def standards_by_framework_csv(request, slug):
framework = get_object_or_404(Framework, slug=slug)
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="%s_standards.csv"' % (framework.slug.lower())
writer = csv.writer(response, encoding='utf-8')
writer.writerow([
'framework',
'category',
'standard',
'description'
])
for standard in framework.standards.all():
writer.writerow([
framework.slug.lower(),
standard.category.shortcode,
standard.shortcode,
standard.name
])
return response
def categories_by_framework_csv(request, slug):
framework = get_object_or_404(Framework, slug=slug)
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="%s_categories.csv"' % (framework.slug.lower())
writer = csv.writer(response, encoding='utf-8')
writer.writerow([
'framework',
'parent',
'category',
'type',
'description'
])
# Make categories without parents appear before categories with parents.
# Because there are only two layers of categories, this ensures that
# every parent category appears before any of its children. The secondary
# sort by shortcode is just to make the output file easier to read.
categories = framework.categories.all()
categories = sorted(categories, key=lambda c: [c.parent_shortcode(), c.shortcode])
for category in categories:
writer.writerow([
framework.slug.lower(),
category.parent_shortcode(),
category.shortcode,
category.type,
category.description.strip() if category.description else category.name
])
return response
def single_standard(request, slug, shortcode):
# standard = get_object_or_404(Standard.objects.prefetch_related('lesson_set__unitlesson_set__unit__curriculum'), framework__slug=slug, shortcode=shortcode)
standard = get_object_or_404(Standard, framework__slug=slug, shortcode=shortcode)
return render(request, 'standards/standard.html', {'standard': standard})
@api_view(['GET', ])
def standard_element(request, slug, shortcode, format=None):
# standard = get_object_or_404(Standard.objects.prefetch_related('lesson_set__unitlesson_set__unit__curriculum'), framework__slug=slug, shortcode=shortcode)
standard = get_object_or_404(Standard, framework__slug=slug, shortcode=shortcode)
serializer = StandardSerializer(standard)
return Response(serializer.data)
@api_view(['GET', ])
def standard_list(request, curriculum_slug, framework_slug=None):
curriculum = get_object_or_404(Curriculum, slug=curriculum_slug)
if framework_slug:
standards = Standard.objects.filter(framework__slug=framework_slug)
else:
standards = Standard.objects.all()
serializer = StandardSerializer(standards, context={'curriculum': curriculum}, many=True)
return Response(serializer.data)
@api_view(['GET', ])
def nested_standard_list(request, curriculum_slug, framework_slug=None):
curriculum = get_object_or_404(Curriculum, slug=curriculum_slug)
query = []
serialized = {}
if framework_slug:
query.append(("framework__slug", framework_slug))
query_list = [Q(x) for x in query]
standards = Standard.objects.filter(reduce(operator.and_, query_list)).order_by('shortcode')
for standard in standards:
print standard.shortcode
serializer = NestedStandardSerializer(standard, context={'curriculum': curriculum})
serialized[standard.shortcode] = serializer.data
# return Response(serializer.data)
return Response(SortedDict(serialized))
@api_view(['GET', ])
def nested_category_list(request, curriculum_slug, framework_slug=None):
curriculum = get_object_or_404(Curriculum, slug=curriculum_slug)
query = []
serialized = {}
if request.GET.get('category'): # Filter by standard / cat type
query.append(("type", request.GET.get('category')))
else:
query.append(('parent__isnull', True))
if framework_slug:
query.append(("framework__slug", framework_slug))
# categories = Category.objects.filter(parent__isnull=True, framework__slug=framework_slug).order_by('shortcode')
# else:
# categories = Category.objects.filter(parent__isnull=True).order_by('shortcode')
query_list = [Q(x) for x in query]
categories = Category.objects.filter(reduce(operator.and_, query_list)).order_by('shortcode')
for category in categories:
print category.shortcode
serializer = NestedCategorySerializer(category, context={'curriculum': curriculum})
serialized[category.shortcode] = serializer.data
# return Response(serializer.data)
return Response(SortedDict(serialized))
|
StarcoderdataPython
|
8166159
|
# Generated by Django 2.0.2 on 2018-08-21 13:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0002_auto_20180821_1337'),
]
operations = [
migrations.AlterField(
model_name='student',
name='bitsId',
field=models.CharField(max_length=15, null=True),
),
migrations.AlterField(
model_name='student',
name='name',
field=models.CharField(max_length=50, null=True),
),
migrations.AlterField(
model_name='student',
name='username',
field=models.CharField(max_length=10, null=True),
),
]
|
StarcoderdataPython
|
8154931
|
import gffutils
import argparse
import os
def split3UTR(UTR3gff, fragsize, outfile):
gff_fn = UTR3gff
print 'Indexing gff...'
db_fn = os.path.abspath(gff_fn) + '.db'
if os.path.isfile(db_fn) == False:
gffutils.create_db(gff_fn, db_fn, merge_strategy = 'merge', verbose = True)
db = gffutils.FeatureDB(db_fn)
print 'Done indexing!'
UTR3s = db.features_of_type('UTR3')
outfh = open(outfile, 'w')
for UTR3 in UTR3s:
#Only going to consider single exon UTRs
if len(list(db.children(UTR3, featuretype = 'exon', level = 1))) > 1:
continue
ID = UTR3.attributes['ID'][0]
parent = UTR3.attributes['Parent'][0]
gene_id = UTR3.attributes['gene_id'][0]
coord = UTR3.start
counter = 1
while coord <= UTR3.end:
windowstart = coord
windowend = coord + fragsize
idfield = 'ID=' + ID + '.utr3fragment{0}'.format(counter) + ';Parent=' + parent + ';gene_id=' + gene_id
with open(outfile, 'a') as outfh:
outfh.write(('\t').join([str(UTR3.chrom), 'longest3UTRfrags', 'UTR3frag', str(windowstart), str(windowend), '.', str(UTR3.strand), '.', idfield]) + '\n')
coord = coord + fragsize + 1
counter +=1
os.remove(db_fn)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gff', type = str, help = 'Input gff of 3\' UTRs to split.')
parser.add_argument('--fragsize', type = int, help = 'Fragment size for UTR fragments.')
parser.add_argument('--output', type = str, help = 'Output gff file.')
args = parser.parse_args()
split3UTR(args.gff, args.fragsize, args.output)
|
StarcoderdataPython
|
1975721
|
"""
This file contains the fixtures that are reusable by any tests within
this directory. You don't need to import the fixtures as pytest will
discover them automatically. More info here:
https://docs.pytest.org/en/latest/fixture.html
"""
from typing import Any, Dict
import pytest
from kedro.io import AbstractDataSet, DataCatalog
class FakeDataSet(AbstractDataSet):
def __init__(self, data):
self.log = []
self.data = data
def _load(self) -> Any:
self.log.append(("load", self.data))
return self.data
def _save(self, data: Any) -> None:
self.log.append(("save", data))
self.data = data
def _describe(self) -> Dict[str, Any]:
return {"data": self.data}
@pytest.fixture
def fake_data_set():
return FakeDataSet(123)
@pytest.fixture
def catalog(fake_data_set):
return DataCatalog({"test": fake_data_set})
|
StarcoderdataPython
|
3205169
|
# ACTIVITY 3: PYTHON NUMBERS
# Program Description: This is a program that takes the user's input to calculate for the employee's gross and net salary.
# The hourly rate is already pre-determined and has been set to 500.
# Additionally, the tax rate is set to 10 percent of the employee's gross income.
hourlyRate = 500
taxRate = 0.10
# A loop will be used to make sure that the user can use the program continously.
while True:
# The program will mainly use float when working with the numbers.
# This will help in the accuracy of the calculations.
print ( "==================== ENTER DETAILS ====================" )
employeeName = input ( " Employee Name: " )
numberHours = float( input ( " Enter number of hours: " ) )
grossSalary = float(numberHours) * float(hourlyRate)
sssContribution = float( input ( " SSS contribution: " ) )
philHealth = float ( input ( " Phil Health: " ) )
housingLoan = float ( input ( " Housing loan: " ) )
print ( "=======================================================\n" )
# Generating payslip.
print ( "====================== PAYSLIP ======================" )
print ( "================ EMPLOYEE INFORMATION =================" )
print ( " Employee Name: " , employeeName )
print ( " Rendered Hours: ", numberHours )
print ( " Rate per Hour: ", hourlyRate )
print ( " Gross Salary: ", grossSalary )
print ( "====================== DEDUCTIONS =====================" )
print ( " SSS: " , sssContribution )
print ( " PhilHealth: " , philHealth )
print ( " Other Loan: " , housingLoan )
tax = float ( grossSalary ) * taxRate
print ( " Tax : " , tax )
deductions_total = sssContribution + philHealth + housingLoan + tax
print ( " Total Deductions: " , deductions_total )
netSalary = grossSalary - deductions_total
print ( "\n Net Salary: PHP " , netSalary )
print ( "=======================================================\n" )
|
StarcoderdataPython
|
9775632
|
# -*- coding: utf-8 -*-
import os.path
from gettext import NullTranslations, translation
translation_dir = os.path.join(
os.path.dirname(
os.path.abspath(
__file__,
),
),
"translations"
)
current_translation = NullTranslations()
def set_locale(locales):
global current_translation
current_translation = translation(
domain="rqalpha",
localedir=translation_dir,
languages=locales,
)
def gettext(message):
return current_translation.gettext(message)
|
StarcoderdataPython
|
285989
|
# Generated by Django 3.0.2 on 2020-02-11 17:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('posts', '0006_auto_20200211_1625'),
]
operations = [
migrations.AlterField(
model_name='post',
name='image',
field=models.ForeignKey(default=34, on_delete=django.db.models.deletion.CASCADE, to='posts.UploadedImage'),
preserve_default=False,
),
]
|
StarcoderdataPython
|
11280601
|
# -*- coding: utf-8 -*-
import time
from pyqtgraph.Qt import QtCore, QtGui, QtTest
def resizeWindow(win, w, h, timeout=2.0):
"""Resize a window and wait until it has the correct size.
This is required for unit testing on some platforms that do not guarantee
immediate response from the windowing system.
"""
QtGui.QApplication.processEvents()
# Sometimes the window size will switch multiple times before settling
# on its final size. Adding qWaitForWindowExposed seems to help with this.
QtTest.QTest.qWaitForWindowExposed(win)
win.resize(w, h)
start = time.time()
while True:
w1, h1 = win.width(), win.height()
if (w,h) == (w1,h1):
return
QtTest.QTest.qWait(10)
if time.time()-start > timeout:
raise TimeoutError("Window resize failed (requested %dx%d, got %dx%d)" % (w, h, w1, h1))
# Functions for generating user input events.
# We would like to use QTest for this purpose, but it seems to be broken.
# See: http://stackoverflow.com/questions/16299779/qt-qgraphicsview-unit-testing-how-to-keep-the-mouse-in-a-pressed-state
def mousePress(widget, pos, button, modifier=None):
if isinstance(widget, QtGui.QGraphicsView):
widget = widget.viewport()
if modifier is None:
modifier = QtCore.Qt.KeyboardModifier.NoModifier
event = QtGui.QMouseEvent(QtCore.QEvent.Type.MouseButtonPress, pos, button, QtCore.Qt.MouseButton.NoButton, modifier)
QtGui.QApplication.sendEvent(widget, event)
def mouseRelease(widget, pos, button, modifier=None):
if isinstance(widget, QtGui.QGraphicsView):
widget = widget.viewport()
if modifier is None:
modifier = QtCore.Qt.KeyboardModifier.NoModifier
event = QtGui.QMouseEvent(QtCore.QEvent.Type.MouseButtonRelease, pos, button, QtCore.Qt.MouseButton.NoButton, modifier)
QtGui.QApplication.sendEvent(widget, event)
def mouseMove(widget, pos, buttons=None, modifier=None):
if isinstance(widget, QtGui.QGraphicsView):
widget = widget.viewport()
if modifier is None:
modifier = QtCore.Qt.KeyboardModifier.NoModifier
if buttons is None:
buttons = QtCore.Qt.MouseButton.NoButton
event = QtGui.QMouseEvent(QtCore.QEvent.Type.MouseMove, pos, QtCore.Qt.MouseButton.NoButton, buttons, modifier)
QtGui.QApplication.sendEvent(widget, event)
def mouseDrag(widget, pos1, pos2, button, modifier=None):
mouseMove(widget, pos1)
mousePress(widget, pos1, button, modifier)
mouseMove(widget, pos2, button, modifier)
mouseRelease(widget, pos2, button, modifier)
def mouseClick(widget, pos, button, modifier=None):
mouseMove(widget, pos)
mousePress(widget, pos, button, modifier)
mouseRelease(widget, pos, button, modifier)
|
StarcoderdataPython
|
5005754
|
<reponame>Hwizdaleck/Python
usuario1=input('digite seu usuario')
usuario2='<PASSWORD>'
qtd=len(usuario1)
if (qtd)!=6 or usuario1!=usuario2:
print('senha incorreta')
else:
print('senha correta')
|
StarcoderdataPython
|
5080301
|
# -*- encoding: utf-8 -*-
"""
@Author : zYx.Tom
@Contact : <EMAIL>
@site : https://zhuyuanxiang.github.io
---------------------------
@Software : PyCharm
@Project : deep-learning-with-python-notebooks
@File : ch0601_raw_text_to_word_embedding.py
@Version : v0.1
@Time : 2019-11-23 14:26
@License : (C)Copyright 2018-2019, zYx.Tom
@Reference : 《Python 深度学习,Francois Chollet》, Sec060103,P155
@Desc : 深度学习用于文本和序列,处理文本数据——从原始文本到词嵌入
"""
import os
import sys
import matplotlib.pyplot as plt
import numpy as np # pip install numpy<1.17,小于1.17就不会报错
import winsound
from keras.activations import relu, sigmoid
from keras.layers import Dense, Flatten
from keras.layers import Embedding
from keras.losses import binary_crossentropy
from keras.models import Sequential
from keras.optimizers import rmsprop
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from tools import plot_classes_results
# 屏蔽警告:Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# 设置数据显示的精确度为小数点后3位
np.set_printoptions(precision = 3, suppress = True, threshold = np.inf, linewidth = 200)
# to make this notebook's output stable across runs
seed = 42
np.random.seed(seed)
# Python ≥3.5 is required
assert sys.version_info >= (3, 5)
# numpy 1.16.4 is required
assert np.__version__ in ["1.16.5", "1.16.4"]
# Listing 6.8 准备 IMDB 原始数据的标签
imdb_dir = "C:/Users/Administrator/PycharmProjects/Data/aclImdb/"
train_dir = os.path.join(imdb_dir, 'train')
test_dir = os.path.join(imdb_dir, 'test')
# Listing 6.9 对 IMDB 原始数据的文本进行分词
max_len = 100 # 每条评论的最大长度
max_words = 10000 # 单词表大小
epochs = 10
batch_size = 32
training_samples = 200
validation_samples = 10000
def load_data_set(data_dir):
labels, texts = [], []
for label_type in ['neg', 'pos']:
dir_name = os.path.join(data_dir, label_type)
for fname in os.listdir(dir_name):
# print(fname)
if fname[-4:] == '.txt':
f = open(os.path.join(dir_name, fname), encoding = 'utf-8')
texts.append(f.read())
f.close()
if label_type == 'neg':
labels.append(0)
else:
labels.append(1)
pass
pass
pass
pass
return texts, labels
train_texts, train_labels = load_data_set(train_dir)
test_texts, test_labels = load_data_set(test_dir)
tokenizer = Tokenizer(num_words = max_words)
tokenizer.fit_on_texts(train_texts)
word_index = tokenizer.word_index
print("Found {} unique tokens.".format(len(word_index)))
train_sequences = tokenizer.texts_to_sequences(train_texts)
train_data = pad_sequences(train_sequences, maxlen = max_len)
train_labels = np.asarray(train_labels)
print("Shape of data tensor:", train_data.shape)
print("Shape of label tensor:", train_labels.shape)
indices = np.arange(train_data.shape[0])
np.random.shuffle(indices)
train_data = train_data[indices]
train_labels = train_labels[indices]
x_train = train_data[:training_samples]
y_train = train_labels[:training_samples]
x_val = train_data[training_samples:training_samples + validation_samples]
y_val = train_labels[training_samples:training_samples + validation_samples]
test_sequences = tokenizer.texts_to_sequences(test_texts)
x_test = pad_sequences(test_sequences, maxlen = max_len)
y_test = np.asarray(test_labels)
# downloading...https://nlp.stanford.edu/projects/glove/
# 下载:http://nlp.stanford.edu/data/glove.6B.zip
# Listing 6.10 解析 GloVe 词嵌入文件
glove_dir = "C:/Users/Administrator/PycharmProjects/Data/GloVe/"
embeddings_index = {}
f = open(os.path.join(glove_dir, 'glove.6B.200d.txt'), encoding = 'utf-8')
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype = 'float32')
embeddings_index[word] = coefs
pass
f.close()
print("Found {} word vectors.".format(len(embeddings_index)))
# Listing 6.11 准备 GloVe 词嵌入矩阵
embedding_dim = 200
embedding_matrix = np.zeros((max_words, embedding_dim))
for word, i in word_index.items():
if i < max_words:
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
pass
pass
pass
def train_model(model):
# Listing 6.14 训练与评估模型
# 这个声明放在最前面会报错
from keras.metrics import binary_accuracy
model.compile(optimizer = rmsprop(lr = 0.001), loss = binary_crossentropy, metrics = [binary_accuracy])
return model.fit(x_train, y_train, epochs = epochs, batch_size = batch_size, validation_data = (x_val, y_val),
verbose = 2, use_multiprocessing = True)
# ----------------------------------------------------------------------
def definite_model_use_GloVe():
# Listing 6.12 模型定义
title = "使用预训练词嵌入"
model = Sequential()
model.add(Embedding(max_words, embedding_dim, input_length = max_len))
model.add(Flatten())
model.add(Dense(32, activation = relu))
model.add(Dense(1, activation = sigmoid))
# Listing 6.13 将 GloVe 训练好的词嵌入加载到 Embedding 层中
model.layers[0].set_weights([embedding_matrix])
model.layers[0].trainable = False
model.summary()
history = train_model(model)
plot_classes_results(history, title, epochs)
model.save_weights('pre_trained_glove_model.h5')
model.load_weights('pre_trained_glove_model.h5')
print(title + "评估测试集", model.evaluate(x_test, y_test))
# ----------------------------------------------------------------------
def definite_model_no_GloVe():
title = "不使用预训练词嵌入"
model = Sequential()
model.add(Embedding(max_words, embedding_dim, input_length = max_len))
model.add(Flatten())
model.add(Dense(32, activation = relu))
model.add(Dense(1, activation = sigmoid))
model.summary()
history = train_model(model)
plot_classes_results(history, title, epochs)
print(title + "评估测试集", model.evaluate(x_test, y_test))
# ----------------------------------------------------------------------
definite_model_use_GloVe()
definite_model_no_GloVe()
# 运行结束的提醒
winsound.Beep(600, 500)
if len(plt.get_fignums()) != 0:
plt.show()
pass
|
StarcoderdataPython
|
6690371
|
<filename>klusta_process_manager/fileBrowser/fileBrowser.py
import os
#QT
import sip
sip.setapi('QVariant',2)
sip.setapi('QString',2)
from PyQt4 import QtCore,QtGui
from .tableDelegate import TableDelegate
from .folderView import FolderView
#------------------------------------------------------------------------------------------------------------
# Worker: Runs continuously in a separate thread
# Can do differents method / method can be interrupt by new method call
#------------------------------------------------------------------------------------------------------------
class Worker(QtCore.QObject):
valueChanged=QtCore.pyqtSignal(int)
folderDone=QtCore.pyqtSignal(int)
finished=QtCore.pyqtSignal()
def __init__(self):
super(Worker,self).__init__()
self._abort=False
self._interrupt=False
self._method="none"
self.mutex=QtCore.QMutex()
self.condition=QtCore.QWaitCondition()
def mainLoop(self):
while 1:
self.mutex.lock()
if not self._interrupt and not self._abort:
self.condition.wait(self.mutex)
self._interrupt=False
if self._abort:
self.finished.emit()
return
method=self._method
self.mutex.unlock()
if method=="icon_folder":
self.doMethod_icon_folder()
def requestMethod(self,method,arg=None):
locker=QtCore.QMutexLocker(self.mutex)
self._interrupt=True
self._method=method
self._arg=arg
self.condition.wakeOne()
def doMethod_icon_folder(self):
expList=self._arg
i=0
s=len(expList)
for exp in expList:
self.mutex.lock()
abort=self._abort
interrupt=self._interrupt
self.mutex.unlock()
if abort or interrupt:
self.valueChanged.emit(100)
break
exp.reset_folder_icon()
self.folderDone.emit(i)
i+=1
self.valueChanged.emit(i*100.0/s)
def abort(self):
locker=QtCore.QMutexLocker(self.mutex)
self._abort=True
self.condition.wakeOne()
#------------------------------------------------------------------------------------------------------------
# Model
#------------------------------------------------------------------------------------------------------------
class Model(QtCore.QAbstractTableModel):
def __init__(self,delegate=None,parent=None):
super(Model,self).__init__(parent)
#thread
self.working=False
self.thread=QtCore.QThread()
self.worker=Worker()
self.worker.moveToThread(self.thread)
self.thread.started.connect(self.worker.mainLoop)
self.thread.finished.connect(self.deleteLater)
self.thread.start()
self.worker.folderDone.connect(self.icon_done)
self.worker.finished.connect(self.thread.quit)
#list of current experiments to display
self.experimentList=[]
#Delegate
self.delegate=delegate
def rowCount(self,QModelIndex):
return len(self.experimentList)
def columnCount(self,QModelIndex):
return 4
def icon_done(self,row):
idx=self.index(row,3)
self.dataChanged.emit(idx,idx)
def reset_list(self,expList):
self.beginResetModel()
expList.sort()
self.experimentList=expList[:]
self.reset_horizontal_lines()
self.worker.requestMethod("icon_folder",self.experimentList)
self.endResetModel()
#To draw horizontal line according to date
def reset_horizontal_lines(self):
listDate=[exp.dateTime for exp in self.experimentList]
self.delegate.reset_horizontal_lines(listDate)
def clear(self):
self.beginResetModel()
self.experimentList=[]
self.endResetModel()
def data(self,index,role):
col=index.column()
row=index.row()
if role==QtCore.Qt.DisplayRole:
if col==0:
return self.experimentList[row].yearMonth
if col==1:
return self.experimentList[row].day
if col==2:
return self.experimentList[row].time
if col==3:
return self.experimentList[row].folderName
if role==QtCore.Qt.DecorationRole:
if col==3:
path=os.path.join(os.path.dirname(os.path.realpath(__file__)), '../icons/')
path=os.path.realpath(path)+"/"
return QtGui.QIcon(path+self.experimentList[row].folder.icon)
def flags(self,index):
if index.column()==3:
return QtCore.Qt.ItemIsEnabled|QtCore.Qt.ItemIsSelectable
return QtCore.Qt.NoItemFlags
def pathLocal_from_index(self,index):
exp=self.experimentList[index.row()]
return exp.pathLocal
def createFiles_onSelection(self,selection,prmModel,prbModel):
for index in selection:
self.experimentList[index.row()].create_files(prmModel=prmModel,prbModel=prbModel)
self.experimentList[index.row()].reset_folder_icon()
self.dataChanged.emit(selection[0],selection[-1])
def update_exp(self,exp):
if exp in self.experimentList:
row=self.experimentList.index(exp)
index=self.index(row,3)
self.dataChanged.emit(index,index)
#--------------------------------------------------------------------------------------------------------------
# FileBrowser Widget
#--------------------------------------------------------------------------------------------------------------
class FileBrowser(QtGui.QWidget):
def __init__(self,ROOT,parent=None):
super(FileBrowser,self).__init__(parent)
#Combo Box
self.animalComboBox=QtGui.QComboBox()
#model/view
self.delegate=TableDelegate(self)
self.model=Model(self.delegate,self)
self.view=FolderView(self.model,self)
self.model.worker.valueChanged.connect(self.display_load)
self.view.table.setItemDelegate(self.delegate)
#button
pathIcon=os.path.join(os.path.dirname(os.path.realpath(__file__)), '../icons/downarrow.png')
pathIcon=os.path.realpath(pathIcon)
self.button_add=QtGui.QPushButton(QtGui.QIcon(pathIcon)," ")
self.button_createFiles=QtGui.QPushButton("Create prm/prb")
self.button_createFiles.clicked.connect(self.createFiles)
self.button_createFiles.setEnabled(False)
self.button_loadModels=QtGui.QPushButton("Load models")
self.button_loadModels.clicked.connect(self.loadModels)
#label
labelPath=ROOT+os.sep
if len(labelPath)>20:
labelPath="..."+labelPath[-17:]
self.label_path=QtGui.QLabel(labelPath)
self.label_load=QtGui.QLabel(' ')
self.label_prmModel=QtGui.QLabel('no prm model')
self.label_prbModel=QtGui.QLabel('no prb model')
self.prmModel=QtCore.QFileInfo()
self.prbModel=QtCore.QFileInfo()
#Layout
hbox1=QtGui.QHBoxLayout()
hbox1.addWidget(self.label_path)
hbox1.addWidget(self.animalComboBox)
hbox1.addStretch()
hbox2=QtGui.QHBoxLayout()
hbox2.addWidget(self.view.label_hide)
hbox2.addWidget(self.view.edit_hide)
grid=QtGui.QHBoxLayout()
grid.addWidget(self.button_add)
grid.addWidget(self.button_loadModels)
grid.addWidget(self.label_prmModel)
grid.addWidget(self.label_prbModel)
grid.addWidget(self.button_createFiles)
grid.addWidget(self.label_load)
layout=QtGui.QGridLayout()
layout.addLayout(hbox1,1,1)
layout.addLayout(hbox2,1,2)
layout.addWidget(self.view,2,1,4,2)
layout.addLayout(grid,6,1,1,2)
self.setLayout(layout)
def set_animalComboBox(self,animalList):
for animalID in animalList:
self.animalComboBox.addItem(animalID)
def get_experiment_selection(self):
return self.view.table.selectedIndexes()
def createFiles(self):
if self.prmModel.exists() and self.prbModel.exists():
selection=self.get_experiment_selection()
self.model.createFiles_onSelection(selection,prmModel=self.prmModel,prbModel=self.prbModel)
self.view.refresh()
def loadModels(self):
filebox=QtGui.QFileDialog(self,"Load model for PRB and PRM files")
filebox.setFileMode(QtGui.QFileDialog.ExistingFiles)
filebox.setNameFilters(["PRB/PRM (*.prm *.prb)"])
filebox.setOptions(QtGui.QFileDialog.DontUseNativeDialog)
if filebox.exec_():
for selectedFile in filebox.selectedFiles():
if selectedFile.endswith(".prm"):
self.prmModel.setFile(selectedFile)
self.label_prmModel.setText(self.prmModel.fileName())
elif selectedFile.endswith(".prb"):
self.prbModel.setFile(selectedFile)
self.label_prbModel.setText(self.prbModel.fileName())
if self.prmModel.exists() and self.prbModel.exists():
self.button_createFiles.setEnabled(True)
def display_load(self,i):
percentage=str(i)+'%'
if i==100:
self.label_load.setText("")
else:
self.label_load.setText("Loading icons: "+percentage)
def reset_experimentList(self,experimentInfoList):
self.model.reset_list(experimentInfoList)
self.view.reset_view()
def on_close(self):
self.model.worker.abort()
#self.model.thread.wait()
|
StarcoderdataPython
|
9766191
|
import unittest
from random import random
from bubble_sort import bubble_sort
class TestBubbleSort(unittest.TestCase):
def setUp(self):
self.unsorted_values = [random() for i in range(20)]
def test_bubble_sort_returns_an_array(self):
sorted_values = bubble_sort(self.unsorted_values)
assert isinstance(sorted_values, list)
def test_bubble_sort_correctly_sorts(self):
sorted_values = bubble_sort(self.unsorted_values)
self.assertEqual(sorted(self.unsorted_values), sorted_values)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
6444166
|
<gh_stars>1-10
#coding:utf8
import sys,os
import torch as t
from data import get_data
from model import PoetryModel
from torch import nn
from torch.autograd import Variable
from utils import Visualizer
import tqdm
from torchnet import meter
import ipdb
class Config(object):
data_path = 'data/' # 诗歌的文本文件存放路径
pickle_path= 'tang.npz' # 预处理好的二进制文件
author = None # 只学习某位作者的诗歌
constrain = None # 长度限制
category = 'poet.tang' # 类别,唐诗还是宋诗歌(poet.song)
lr = 1e-3
weight_decay = 1e-4
use_gpu = True
epoch = 20
batch_size = 128
maxlen = 125 # 超过这个长度的之后字被丢弃,小于这个长度的在前面补空格
plot_every = 20 # 每20个batch 可视化一次
# use_env = True # 是否使用visodm
env='poetry' # visdom env
max_gen_len = 200 # 生成诗歌最长长度
debug_file='/tmp/debugp'
model_path=None # 预训练模型路径
prefix_words = '细雨鱼儿出,微风燕子斜。' # 不是诗歌的组成部分,用来控制生成诗歌的意境
start_words='闲云潭影日悠悠' # 诗歌开始
acrostic = False # 是否是藏头诗
model_prefix = 'checkpoints/tang' # 模型保存路径
opt = Config()
def generate(model,start_words,ix2word,word2ix,prefix_words=None):
'''
给定几个词,根据这几个词接着生成一首完整的诗歌
start_words:u'春江潮水连海平'
比如start_words 为 春江潮水连海平,可以生成:
'''
results = list(start_words)
start_word_len = len(start_words)
# 手动设置第一个词为<START>
input = Variable(t.Tensor([word2ix['<START>']]).view(1,1).long())
if opt.use_gpu:input=input.cuda()
hidden = None
if prefix_words:
for word in prefix_words:
output,hidden = model(input,hidden)
input = Variable(input.data.new([word2ix[word]])).view(1,1)
for i in range(opt.max_gen_len):
output,hidden = model(input,hidden)
if i<start_word_len:
w = results[i]
input = Variable(input.data.new([word2ix[w]])).view(1,1)
else:
top_index = output.data[0].topk(1)[1][0]
w = ix2word[top_index]
results.append(w)
input = Variable(input.data.new([top_index])).view(1,1)
if w=='<EOP>':
del results[-1]
break
return results
def gen_acrostic(model,start_words,ix2word,word2ix, prefix_words = None):
'''
生成藏头诗
start_words : u'深度学习'
生成:
深木通中岳,青苔半日脂。
度山分地险,逆浪到南巴。
学道兵犹毒,当时燕不移。
习根通古岸,开镜出清羸。
'''
results = []
start_word_len = len(start_words)
input = Variable(t.Tensor([word2ix['<START>']]).view(1,1).long())
if opt.use_gpu:input=input.cuda()
hidden = None
index=0 # 用来指示已经生成了多少句藏头诗
# 上一个词
pre_word='<START>'
if prefix_words:
for word in prefix_words:
output,hidden = model(input,hidden)
input = Variable(input.data.new([word2ix[word]])).view(1,1)
for i in range(opt.max_gen_len):
output,hidden = model(input,hidden)
top_index = output.data[0].topk(1)[1][0]
w = ix2word[top_index]
if (pre_word in {u'。',u'!','<START>'} ):
# 如果遇到句号,藏头的词送进去生成
if index==start_word_len:
# 如果生成的诗歌已经包含全部藏头的词,则结束
break
else:
# 把藏头的词作为输入送入模型
w = start_words[index]
index+=1
input = Variable(input.data.new([word2ix[w]])).view(1,1)
else:
# 否则的话,把上一次预测是词作为下一个词输入
input = Variable(input.data.new([word2ix[w]])).view(1,1)
results.append(w)
pre_word = w
return results
def train(**kwargs):
for k,v in kwargs.items():
setattr(opt,k,v)
vis = Visualizer(env=opt.env)
# 获取数据
data,word2ix,ix2word = get_data(opt)
data = t.from_numpy(data)
dataloader = t.utils.data.DataLoader(data,
batch_size=opt.batch_size,
shuffle=True,
num_workers=1)
# 模型定义
model = PoetryModel(len(word2ix), 128, 256)
optimizer = t.optim.Adam(model.parameters(), lr=opt.lr)
criterion = nn.CrossEntropyLoss()
if opt.model_path:
model.load_state_dict(t.load(opt.model_path))
if opt.use_gpu:
model.cuda()
criterion.cuda()
loss_meter = meter.AverageValueMeter()
for epoch in range(opt.epoch):
loss_meter.reset()
for ii,data_ in tqdm.tqdm(enumerate(dataloader)):
# 训练
data_ = data_.long().transpose(1,0).contiguous()
if opt.use_gpu: data_ = data_.cuda()
optimizer.zero_grad()
input_,target = Variable(data_[:-1,:]),Variable(data_[1:,:])
output,_ = model(input_)
loss = criterion(output,target.view(-1))
loss.backward()
optimizer.step()
loss_meter.add(loss.data[0])
# 可视化
if (1+ii)%opt.plot_every==0:
if os.path.exists(opt.debug_file):
ipdb.set_trace()
vis.plot('loss',loss_meter.value()[0])
# 诗歌原文
poetrys=[ [ix2word[_word] for _word in data_[:,_iii]]
for _iii in range(data_.size(1))][:16]
vis.text('</br>'.join([''.join(poetry) for poetry in poetrys]),win=u'origin_poem')
gen_poetries = []
# 分别以这几个字作为诗歌的第一个字,生成8首诗
for word in list(u'春江花月夜凉如水'):
gen_poetry = ''.join(generate(model,word,ix2word,word2ix))
gen_poetries.append(gen_poetry)
vis.text('</br>'.join([''.join(poetry) for poetry in gen_poetries]),win=u'gen_poem')
t.save(model.state_dict(),'%s_%s.pth' %(opt.model_prefix,epoch))
def gen(**kwargs):
'''
提供命令行接口,用以生成相应的诗
'''
for k,v in kwargs.items():
setattr(opt,k,v)
data,word2ix,ix2word = get_data(opt)
model = PoetryModel(len(word2ix), 128, 256);
map_location = lambda s,l:s
state_dict = t.load(opt.model_path,map_location=map_location)
model.load_state_dict(state_dict)
if opt.use_gpu:
model.cuda()
if sys.version_info.major == 3:
if opt.start_words.isprintable():
start_words = opt.start_words
prefix_words = opt.prefix_words if opt.prefix_words else None
else:
start_words = opt.start_words.encode('ascii', 'surrogateescape').decode('utf8')
prefix_words = opt.prefix_words.encode('ascii', 'surrogateescape').decode('utf8') if opt.prefix_words else None
else:
start_words = opt.start_words.decode('utf8')
prefix_words = opt.prefix_words.decode('utf8') if opt.prefix_words else None
start_words= start_words.replace(',',u',')\
.replace('.',u'。')\
.replace('?',u'?')
gen_poetry = gen_acrostic if opt.acrostic else generate
result = gen_poetry(model,start_words,ix2word,word2ix,prefix_words)
print(''.join(result))
if __name__ == '__main__':
import fire
fire.Fire()
|
StarcoderdataPython
|
146386
|
<gh_stars>1-10
import nmap
# Création de l'objet
scanner = nmap.PortScanner()
# Input & Cast
ip_addr = input("Target IP : ")
type(ip_addr)
# Display Options
resp = input("""\n Options :
1)SYN ACK Scan
2)Comprehensive Scan \n""")
# Scan de port : SYN
if resp == '1':
print("Nmap Version: ", scanner.nmap_version())
scanner.scan(ip_addr, '1-1024', '-v -sS')
print(scanner.scaninfo())
print("Ip Status: ", scanner[ip_addr].state())
print(scanner[ip_addr].all_protocols())
print("Open Ports: ", scanner[ip_addr]['tcp'].keys())
# Scan de port : SYN ; Detection des services & versions & OS & Scan Agressif
elif resp == '2':
print("Nmap Version: ", scanner.nmap_version())
scanner.scan(ip_addr, '1-1024', '-v -sS -sV -sC -A -O')
print(scanner.scaninfo())
print("Ip Status: ", scanner[ip_addr].state())
print(scanner[ip_addr].all_protocols())
print("Open Ports: ", scanner[ip_addr]['tcp'].keys())
elif resp >= '4':
print("Please enter a valid option")
|
StarcoderdataPython
|
3523464
|
<filename>research/cv/stgcn/src/model/metric.py
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
stgcn network with loss.
"""
import mindspore.nn as nn
import mindspore.ops as P
class LossCellWithNetwork(nn.Cell):
"""STGCN loss."""
def __init__(self, network):
super(LossCellWithNetwork, self).__init__()
self.loss = nn.MSELoss()
self.network = network
self.reshape = P.Reshape()
def construct(self, x, label):
x = self.network(x)
x = self.reshape(x, (len(x), -1))
label = self.reshape(label, (len(label), -1))
STGCN_loss = self.loss(x, label)
return STGCN_loss
|
StarcoderdataPython
|
6422825
|
'''
Test alb template creation, resource creation
and resource functionality for Anchore alb
'''
import os
import unittest
import pytest
from anchore import alb, main
from tests.mocks import schema
class TestALB(unittest.TestCase):
def setUp(self):
self.template = alb.ALBTemplate()
def test_add_descriptions(self):
template_file = self.template.add_descriptions("foobar")
self.assertEqual(template_file.to_yaml(), schema.MOCKED_DESCRIPTION)
def test_add_version(self):
template_file = self.template.add_version("2010-09-09")
self.assertEqual(template_file.to_yaml(), schema.MOCKED_VERSION)
def test_create_alb_template(self):
test_engine = main.AnchoreEngine()
self.assertEqual(test_engine.create_alb_template(), schema.mocked_alb_template())
def tearDown(self):
self.template = alb.ALBTemplate()
|
StarcoderdataPython
|
3200226
|
<reponame>epoch8/datapipe<filename>tests/test_core_steps2.py<gh_stars>1-10
# Ex test_compute
# from typing import cast
# import pytest
import time
import pandas as pd
from sqlalchemy import Column
from sqlalchemy.sql.sqltypes import Integer
from datapipe.store.database import TableStoreDB, MetaKey
from datapipe.datatable import DataStore
from datapipe.core_steps import do_batch_generate, do_full_batch_transform, BatchTransformStep
from datapipe.types import ChangeList, IndexDF
from datapipe.run_config import RunConfig
from .util import assert_datatable_equal, assert_df_equal
TEST_SCHEMA1 = [
Column('item_id', Integer, primary_key=True),
Column('pipeline_id', Integer, primary_key=True),
Column('a', Integer),
]
TEST_SCHEMA2 = [
Column('item_id', Integer, primary_key=True),
Column('a', Integer),
]
TEST_SCHEMA2 = [
Column('item_id', Integer, primary_key=True),
Column('a', Integer),
]
TEST_SCHEMA2 = [
Column('item_id', Integer, primary_key=True),
Column('a', Integer),
]
PRODUCTS_SCHEMA = [
Column('product_id', Integer, primary_key=True),
Column('pipeline_id', Integer, primary_key=True),
Column('b', Integer),
]
ITEMS_SCHEMA = [
Column('item_id', Integer, primary_key=True),
Column('pipeline_id', Integer, primary_key=True),
Column('product_id', Integer, MetaKey()),
Column('a', Integer),
]
TEST_DF1_1 = pd.DataFrame(
{
'item_id': range(10),
'pipeline_id': [i // 5 for i in range(10)],
'a': range(10),
},
)
TEST_DF1_2 = pd.DataFrame(
{
'item_id': list(range(5)) * 2,
'pipeline_id': [i // 5 for i in range(10)],
'a': range(10),
},
)
PRODUCTS_DF = pd.DataFrame(
{
'product_id': list(range(2)),
'pipeline_id': list(range(2)),
'b': range(10, 12),
}
)
ITEMS_DF = pd.DataFrame(
{
'item_id': list(range(5)) * 2,
'pipeline_id': list(range(2)) * 5,
'product_id': list(range(2)) * 5,
'a': range(10),
}
)
def test_batch_transform(dbconn):
ds = DataStore(dbconn)
tbl1 = ds.create_table(
'tbl1',
table_store=TableStoreDB(dbconn, 'tbl1_data', TEST_SCHEMA1, True)
)
tbl2 = ds.create_table(
'tbl2',
table_store=TableStoreDB(dbconn, 'tbl2_data', TEST_SCHEMA1, True)
)
tbl1.store_chunk(TEST_DF1_1, now=0)
do_full_batch_transform(
func=lambda df: df,
ds=ds,
input_dts=[tbl1],
output_dts=[tbl2],
)
meta_df = tbl2.get_metadata()
update_ts = max(meta_df['update_ts'])
process_ts = max(meta_df['process_ts'])
time.sleep(0.1)
do_full_batch_transform(
func=lambda df: df,
ds=ds,
input_dts=[tbl1],
output_dts=[tbl2],
)
meta_df = tbl2.get_metadata()
assert(all(meta_df['update_ts'] == update_ts))
assert(all(meta_df['process_ts'] == process_ts))
def test_batch_transform_with_filter(dbconn):
ds = DataStore(dbconn)
tbl1 = ds.create_table(
'tbl1',
table_store=TableStoreDB(dbconn, 'tbl1_data', TEST_SCHEMA1, True)
)
tbl2 = ds.create_table(
'tbl2',
table_store=TableStoreDB(dbconn, 'tbl2_data', TEST_SCHEMA1, True)
)
tbl1.store_chunk(TEST_DF1_1, now=0)
do_full_batch_transform(
func=lambda df: df,
ds=ds,
input_dts=[tbl1],
output_dts=[tbl2],
run_config=RunConfig(filters={'pipeline_id': 0})
)
assert_datatable_equal(tbl2, TEST_DF1_1.query('pipeline_id == 0'))
def test_batch_transform_with_filter_not_in_transform_index(dbconn):
ds = DataStore(dbconn)
tbl1 = ds.create_table(
'tbl1',
table_store=TableStoreDB(dbconn, 'tbl1_data', TEST_SCHEMA1, True)
)
tbl2 = ds.create_table(
'tbl2',
table_store=TableStoreDB(dbconn, 'tbl2_data', TEST_SCHEMA2, True)
)
tbl1.store_chunk(TEST_DF1_2, now=0)
do_full_batch_transform(
func=lambda df: df[['item_id', 'a']],
ds=ds,
input_dts=[tbl1],
output_dts=[tbl2],
run_config=RunConfig(filters={'pipeline_id': 0}),
)
assert_datatable_equal(tbl2, TEST_DF1_2.query('pipeline_id == 0')[['item_id', 'a']])
def test_batch_transform_with_dt_on_input_and_output(dbconn):
ds = DataStore(dbconn)
tbl1 = ds.create_table(
'tbl1',
table_store=TableStoreDB(dbconn, 'tbl1_data', TEST_SCHEMA1, True)
)
tbl2 = ds.create_table(
'tbl2',
table_store=TableStoreDB(dbconn, 'tbl2_data', TEST_SCHEMA1, True)
)
df2 = TEST_DF1_1.loc[range(3, 8)]
df2['a'] = df2['a'].apply(lambda x: x + 10)
tbl1.store_chunk(TEST_DF1_1, now=0)
tbl2.store_chunk(df2, now=0)
def update_df(df1: pd.DataFrame, df2: pd.DataFrame):
df1 = df1.set_index("item_id")
df2 = df2.set_index("item_id")
df1.update(df2)
return df1.reset_index()
do_full_batch_transform(
func=update_df,
ds=ds,
input_dts=[tbl1, tbl2],
output_dts=[tbl2],
)
df_res = TEST_DF1_1.copy()
df_res.update(df2)
assert_datatable_equal(tbl2, df_res)
def test_gen_with_filter(dbconn):
ds = DataStore(dbconn)
tbl = ds.create_table(
'tbl',
table_store=TableStoreDB(dbconn, 'tbl_data', TEST_SCHEMA1, True)
)
tbl.store_chunk(TEST_DF1_1, now=0)
def gen_func():
yield TEST_DF1_1.query('pipeline_id == 0 and item_id == 0')
do_batch_generate(
func=gen_func,
ds=ds,
output_dts=[tbl],
run_config=RunConfig(filters={'pipeline_id': 0})
)
assert_datatable_equal(tbl, TEST_DF1_1.query('(pipeline_id == 0 and item_id == 0) or pipeline_id == 1'))
def test_transform_with_changelist(dbconn):
ds = DataStore(dbconn)
tbl1 = ds.create_table(
'tbl1',
table_store=TableStoreDB(dbconn, 'tbl1_data', TEST_SCHEMA1, True)
)
tbl2 = ds.create_table(
'tbl2',
table_store=TableStoreDB(dbconn, 'tbl2_data', TEST_SCHEMA1, True)
)
tbl1.store_chunk(TEST_DF1_1, now=0)
def func(df):
return df
step = BatchTransformStep(
'test',
func=func,
input_dts=[tbl1],
output_dts=[tbl2]
)
change_list = ChangeList()
idx_keys = ['item_id', 'pipeline_id']
changes_df = TEST_DF1_1.loc[[0, 1, 2]]
changes_idx = IndexDF(changes_df[idx_keys])
change_list.append('tbl1', changes_idx)
next_change_list = step.run_changelist(ds, change_list)
assert_datatable_equal(tbl2, changes_df)
assert list(next_change_list.changes.keys()) == ['tbl2']
assert_df_equal(next_change_list.changes['tbl2'], changes_idx, index_cols=idx_keys)
def test_batch_transform_with_entity(dbconn):
ds = DataStore(dbconn)
products = ds.create_table(
'products',
table_store=TableStoreDB(dbconn, 'products_data', PRODUCTS_SCHEMA, True)
)
items = ds.create_table(
'items',
table_store=TableStoreDB(dbconn, 'items_data', ITEMS_SCHEMA, True)
)
items2 = ds.create_table(
'items2',
table_store=TableStoreDB(dbconn, 'items2_data', ITEMS_SCHEMA, True)
)
products.store_chunk(PRODUCTS_DF, now=0)
items.store_chunk(ITEMS_DF, now=0)
def update_df(products: pd.DataFrame, items: pd.DataFrame):
merged_df = pd.merge(items, products, on=['product_id', 'pipeline_id'])
merged_df['a'] = merged_df.apply(lambda x: x['a'] + x['b'], axis=1)
return merged_df[['item_id', 'pipeline_id', 'product_id', 'a']]
do_full_batch_transform(
func=update_df,
ds=ds,
input_dts=[products, items],
output_dts=[items2],
)
merged_df = pd.merge(ITEMS_DF, PRODUCTS_DF, on=['product_id', 'pipeline_id'])
merged_df['a'] = merged_df.apply(lambda x: x['a'] + x['b'], axis=1)
items2_df = merged_df[['item_id', 'pipeline_id', 'product_id', 'a']]
assert_df_equal(
items2.get_data(),
items2_df,
index_cols=['item_id', 'pipeline_id']
)
|
StarcoderdataPython
|
11391805
|
# Generated by Django 2.0.6 on 2018-07-02 10:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('modelchimp', '0023_machinelearningmodel_epoch_durations'),
]
operations = [
migrations.AddField(
model_name='machinelearningmodel',
name='experiment_end',
field=models.DateTimeField(null=True),
),
migrations.AddField(
model_name='machinelearningmodel',
name='experiment_start',
field=models.DateTimeField(null=True),
),
]
|
StarcoderdataPython
|
38212
|
# ==============================================================================
# Copyright 2019 - <NAME>
#
# NOTICE: Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# ==============================================================================
""" OpenAI Gym integration modules """
import warnings
from gym.envs.registration import register
from diplomacy_research.models.gym.wrappers import AutoDraw, LimitNumberYears, LoopDetection, SetInitialState, \
AssignPlayers, RandomizePlayers, SetPlayerSeed, SaveGame
# Ignore specific warnings
warnings.filterwarnings('ignore', message='Parameters to load are deprecated')
register(
id='DiplomacyEnv-v0',
entry_point='diplomacy_research.models.gym.environment:DiplomacyEnv')
|
StarcoderdataPython
|
9695289
|
<filename>src/marmo/report/blueprints/reportitem.py
#
# Generated with ReportItemBlueprint
from dmt.blueprint import Blueprint
from dmt.dimension import Dimension
from dmt.attribute import Attribute
from dmt.enum_attribute import EnumAttribute
from dmt.blueprint_attribute import BlueprintAttribute
class ReportItemBlueprint(Blueprint):
""""""
def __init__(self, name="ReportItem", package_path="marmo/report", description=""):
super().__init__(name,package_path,description)
self.attributes.append(Attribute("name","string","",default=""))
self.attributes.append(Attribute("description","string","",default=""))
|
StarcoderdataPython
|
11385453
|
from typing import cast
from pydantic import ValidationError
from werkzeug.exceptions import InternalServerError
from cibo import Blueprint, ErrorContext
from ..exceptions import AuthException
api = Blueprint("api", __name__, openapi_tag="API", tag_description="description of API")
@api.errorhandler(ValidationError)
def handle_validation_error(e: ValidationError):
return ErrorContext().error(data=dict(errors=e.errors()))
@api.errorhandler(AuthException)
def handle_auth_exception(e: AuthException):
return ErrorContext().error(e.msg, e.code)
@api.errorhandler(InternalServerError)
def handle_internal_server_error(e: InternalServerError):
return ErrorContext().error("服务器错误", cast(int, e.code))
def _binding_route_rule():
from . import echo_handler as _
from . import health_check_handler as _
from . import user_handler as _
_binding_route_rule()
|
StarcoderdataPython
|
3372243
|
# Copyright (c) 2021 <NAME>
from pywayland.server import Display, Signal
from wlroots import ffi, PtrHasData, lib
class GammaControlManagerV1(PtrHasData):
def __init__(self, display: Display) -> None:
"""Creates a wlr_gamma_control_manager_v1"""
self._ptr = lib.wlr_gamma_control_manager_v1_create(display._ptr)
self.destroy_event = Signal(ptr=ffi.addressof(self._ptr.events.destroy))
|
StarcoderdataPython
|
6530184
|
<filename>Genclass/__init__.py<gh_stars>1-10
name = 'Genclass'
|
StarcoderdataPython
|
5038028
|
from chainer.backends import cuda
import cupy
import numpy as np
from chainerkfac.optimizers.cholesky_inverse import inverse
import warnings
PI_TYPE_TRACENORM = 'tracenorm'
def get_diagval(link, attrname, damping):
param = getattr(link, attrname, None)
if param is None:
return
r = getattr(param, 'l2_coef', 0.0)
r = max(r, damping)
return r
def compute_pi_tracenorm(lcov, rcov):
"""Compute pi using tracenorm
Computes the scalar constant pi for Tikhonov regularization/damping.
$\pi = \sqrt{ (trace(A) / dim(A)) / (trace(B) / dim(B)) }$
See section 6.3 of https://arxiv.org/pdf/1503.05671.pdf for details.
""" # NOQA
def compute_trace(cov):
if cov.ndim == 1:
return cov.sum()
else:
return cov.trace()
xp = cuda.get_array_module([lcov, rcov])
with cuda.get_device_from_array([lcov, rcov]):
lnorm = compute_trace(lcov) * rcov.shape[0]
rnorm = compute_trace(rcov) * lcov.shape[0]
pi = xp.sqrt(lnorm / rnorm, dtype=xp.float32)
return pi
def compute_pi(lcov, rcov, pi_type=PI_TYPE_TRACENORM):
if pi_type == PI_TYPE_TRACENORM:
return compute_pi_tracenorm(lcov, rcov)
else:
return 1.0
class FisherBlock(object):
"""Base class for Fisher-block.
Args:
linkname (string): Name to identify this FisherBlock object (do not
have to be identical, only for debug use).
link (~chainer.Link): Chainer Link object corresponding to this
FisherBlock object.
cov_ema_decay (float): Decay rate for the exponential moving average of
the Kronecker-factors.
damping (float): Damping value added before taking the inverse of the
Kronecker-factors.
communicate_after_forward (bool): Call All-Reduce after forward
(before backprop).
pi_type (float): Type of the norm used to compute pi.
use_cholesky (bool): Use Cholesky decomposition to compute the inverse of
the Kronecker-factors.
stab_coeff (float): Coefficient which is multiplied to the diagonal
values for the BatchNormalization layer.
loss_scale (float): Scaling value to avoid over/underflow in
low-precision communication.
"""
def __init__(
self,
linkname,
link,
cov_ema_decay,
damping,
communicate_after_forward=False,
pi_type=PI_TYPE_TRACENORM,
use_cholesky=True,
stab_coeff=64.0,
loss_scale=None,
):
self._link = link
self._linkname = linkname
self._linkclass = link.__class__
self._communicate_after_forward = communicate_after_forward
self._pi_type = pi_type
self._use_cholesky = use_cholesky
self._loss_scale = loss_scale
self.cov_ema_decay = cov_ema_decay
self.damping = damping
self.stab_coeff = stab_coeff
self.xp = link.xp
self.covs = None
self.invs = None
self.cov_emas = None
self.diagonalize = False
@property
def linkname(self):
# Used for distributed K-FAC
return self._linkname
@property
def link(self):
# Used for distributed K-FAC
return self._link
@property
def funcclass(self):
raise NotImplementedError
@property
def cov_forward(self):
raise NotImplementedError
@property
def cov_backward(self):
raise NotImplementedError
@property
def inv_forward(self):
raise NotImplementedError
@property
def inv_backward(self):
raise NotImplementedError
def debug(self):
return getattr(self._link, 'debug', False)
def is_mine(self, func, in_data, out_grad_data=None):
raise NotImplementedError
def forward_postprocess(self, func, in_data):
raise NotImplementedError
def backward_preprocess(self, func, in_data, out_grad_data):
raise NotImplementedError
def check_attr(self, name):
if getattr(self, name) is None:
raise RuntimeError(
'{} is None:\nlinkname: {}\nlinkclass: {}'.format(
name, self._linkname, self._linkclass))
def get_diagval(self, attr):
return get_diagval(self._link, attr, self.damping)
def update_cov_emas(self):
self.check_attr('covs')
r = self.cov_ema_decay
if r == 1.0 or self.cov_emas is None:
# To avoid broken in inverse (inverse is implemented using in-place)
self.cov_emas = self.covs.copy()
else:
self.cov_emas = [r*cov + (1 - r)*cov_ema for cov, cov_ema
in zip(self.covs, self.cov_emas)]
def update_invs(self):
self.check_attr('cov_emas')
xp = self.xp
self.invs = []
for cov_ema, diagvals in zip(self.cov_emas, self.get_diagvals()):
if cov_ema.ndim == 1:
inv = 1 / (cov_ema + diagvals)
else:
_cov_ema = cov_ema.copy()
xp.fill_diagonal(cov_ema, xp.diagonal(cov_ema) + diagvals)
inv = inverse(cov_ema, self._use_cholesky)
self.invs.append(inv)
def get_diagvals(self):
raise NotImplementedError
def update_kfgrads(self):
raise NotImplementedError
def extract_attr_from_params(self, attrname, triangular=False):
"""Extracts arrays from all ``Parameter``s
"""
arrays = []
for _, param in sorted(self.link.namedparams()):
x = getattr(param, attrname, None)
if x is not None:
arrays.append((x, triangular))
return arrays
def extract_for_reduce_scatter_v_after_forward(self):
if self._communicate_after_forward:
triangular = True
return [(self.cov_forward, triangular)]
else:
return []
def extract_for_reduce_scatter_v(self):
arrays = self.extract_attr_from_params('grad')
if not self._communicate_after_forward:
triangular = True
arrays.append((self.cov_forward, triangular))
triangular = True
arrays.append((self.cov_backward, triangular))
return arrays
def extract_for_all_gather_v(self, target='kfgrad'):
arrays = self.extract_attr_from_params(target)
triangular = False
return arrays
|
StarcoderdataPython
|
5104217
|
version = "0.25.0" # pylint:disable=invalid-name
|
StarcoderdataPython
|
1996495
|
<reponame>webkom/committee-admissions
from rest_framework import authentication
class SessionAuthentication(authentication.SessionAuthentication):
"""
This class is needed, because REST Framework's default SessionAuthentication does never return
401's, because they cannot fill the WWW-Authenticate header with a valid value in the 401
response. As a result, we cannot distinguish calls that are not unauthorized (401 unauthorized)
and calls for which the user does not have permission (403 forbidden).
See https://github.com/encode/django-rest-framework/issues/5968
We do set authenticate_header function in SessionAuthentication, so that a value for the
WWW-Authenticate header can be retrieved and the response code is automatically set to
401 in case of unauthenticated requests.
"""
def authenticate_header(self, request):
return "Session"
|
StarcoderdataPython
|
4880638
|
<reponame>SafonovMikhail/python_000577<filename>000468BookBRIGG/000468_01_06_01_ex01_for_range_20190528.py
for x in range(0, 5):
print('привет')
input()
|
StarcoderdataPython
|
381448
|
import argparse
from glob import glob
from os import path
parser = argparse.ArgumentParser()
parser.add_argument("outfile")
parser.add_argument("directory")
parser.add_argument("--split", "-s", default='.atsp')
args = parser.parse_args()
target = args.outfile
directory = args.directory
split = args.split
files = glob(f"{directory}/*{split}.?")
stuff = []
for f in files:
with open(f) as handle:
lines = [l.strip() for l in handle]
if len(stuff)==0:
stuff.append(lines[0] + ",graph,algo")
for line in lines[1:]:
if "done" in line:
continue
basename = path.basename(f)
graph = basename.split(split)[0]
if basename.endswith("c"):
algo = "christofides"
else:
algo = "tree-doubling"
line += f",{graph},{algo}"
stuff.append(line)
with open(target, "w") as handle:
towrite = [l+"\n" for l in stuff]
handle.writelines(towrite)
|
StarcoderdataPython
|
3467105
|
<reponame>dwyer/folklorist<filename>ballads/views.py
import logging
import os
import urllib
from google.appengine.api import memcache
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from django.core.paginator import Paginator
from models import *
from hacks import *
# TODO put this in /settings.py and make it an absolute path
TEMPLATES_DIR = 'templates'
class _RequestHandler(webapp.RequestHandler):
template_dir = 'templates'
def write(self, filename='base.html'):
path = os.path.join(self.template_dir, filename)
self.response.out.write(template.render(path, dict(self=self)))
def not_found(self):
self.error(404)
self.message = '404: Page not found.'
self.title = 'Page not found.'
self.write('error.html')
class HomePage(webapp.RequestHandler):
@redirector
def get(self):
path = os.path.join(TEMPLATES_DIR, 'home.html')
context = {'vols': alphabet()}
self.response.out.write(template.render(path, context))
class SearchPage(webapp.RequestHandler):
limit = 30
@redirector
def get(self):
self.query = self.request.get('q')
self.page = int(self.request.get('p', 1))
self.start = int(self.request.get('start', 1))
info = None
error = None
if self.query:
self.query = self.query.strip()
self.title = 'Search for %s - Folklorist' % self.query
self.results = BalladIndex.all(keys_only=True)
for word in query_to_words(self.query):
self.results.filter('index =', word.lower())
#self.num_results = '?'
self.num_results = self.results.count()
offset = (self.page-1) * self.limit
self.results = self.results.fetch(self.limit, offset)
self.results = db.get(self.results)
#self.results = [child.parent() for child in self.results]
if self.results:
# pagination
self.pages = []
if self.page > 1:
self.pages.append(
'<a href="/search?q=%s&p=%d">< Previous</a>' % (
self.query, self.page-1))
if len(self.results) == self.limit:
self.pages.append(
'<a href="/search?q=%s&p=%d">Next ></a>' % (
self.query, self.page+1))
self.pages = ' · '.join(self.pages)
# love!
self.start = offset + 1
self.finish = offset + self.limit
#self.write('search.html')
path = os.path.join(TEMPLATES_DIR, 'search.html')
context = {'self': self, 'vols': alphabet(), 'error': error, 'info': info}
self.response.out.write(template.render(path, context))
class BalladPage(_RequestHandler):
@redirector
def get(self, title):
title = urllib.unquote(title.replace('_', ' '))
query = BalladName.all(keys_only=True).filter('title =', title).get()
# get ballad
self.ballad = None
if query:
key = query.parent()
self.ballad = Ballad.get(key)
# got ballad
if self.ballad:
self.title = self.ballad.title()
self.supptrad = SuppTradFile.get_by_key_name(self.ballad.file, self.ballad)
else: # handle broken links
self.ballad = Ballad.all().filter('name =', title).get()
if self.ballad:
self.redirect(self.ballad.url(), permanent=True)
else:
self.not_found()
return
self.write('ballad.html')
class SitemapPage(webapp.RequestHandler):
@redirector
def get(self, start):
memkey = 'sitemap_%s' % start
output = memcache.get(memkey)
if output is None:
list = BalladName.all().order('name')
list.filter('name >=', start)
list.filter('name <', start+chr(127))
output = template.render('templates/sitemap.xml', dict(list=list))
memcache.set(memkey, output)
self.response.headers['content-type'] = 'text/xml'
self.response.out.write(output)
class ErrorPage(_RequestHandler):
url = '/.*'
def get(self):
self.not_found()
|
StarcoderdataPython
|
11230820
|
<filename>tests/test_NeqSim.py
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 3 22:24:08 2019
@author: ESOL
"""
from neqsim.neqsimpython import jNeqSim
def test_Viscosity():
thermoSystem = jNeqSim.thermo.system.SystemSrkEos(280.0, 10.0)
thermoSystem.addComponent("methane", 10.0)
thermoSystem.addComponent("water", 4.0)
thermoOps = jNeqSim.thermodynamicOperations.ThermodynamicOperations(
thermoSystem)
thermoOps.TPflash()
gasEnthalpy = thermoSystem.getPhase(0).getEnthalpy()
assert abs(1079.4821290144278 - gasEnthalpy) < 1e-10
thermoSystem.initPhysicalProperties("Viscosity")
gasViscosity = thermoSystem.getPhase(0).getViscosity("kg/msec")
assert abs(1.0760998263783299e-05 - gasViscosity) < 1e-10
|
StarcoderdataPython
|
29494
|
from django.shortcuts import render
# Python functions - user is going to request an url
# Create your views here.
from django.http import HttpResponse
def index(request):
return HttpResponse("<h1> This is the music app homepage</h1>")
|
StarcoderdataPython
|
3322627
|
#!/usr/bin/env python3
import boto3
from botocore.exceptions import ClientError
# from botocore.errorfactory import BadRequestException
import os
import logging
# logger = logging.getLogger()
services = {
"access-analyzer.amazonaws.com": "IAM Access Analyzer",
# "guardduty.amazonaws.com": "AWS GuardDuty", # apparently this isn't a proper
}
def main(args, logger):
'''Executes the Primary Logic of the Fast Fix'''
# If they specify a profile use it. Otherwise do the normal thing
if args.profile:
session = boto3.Session(profile_name=args.profile)
else:
session = boto3.Session()
org_client = session.client("organizations")
for service, description in services.items():
response = org_client.list_delegated_administrators(ServicePrincipal=service)
if len(response['DelegatedAdministrators']) == 1:
if response['DelegatedAdministrators'][0]['Id'] == args.accountId:
logger.info(f"{args.accountId} is already the delegated admin for {description}")
else:
logger.error(f"{response['DelegatedAdministrators'][0]['Id']} is the delegated admin for {service}. Not performing the update")
elif len(response['DelegatedAdministrators']) > 1:
logger.error(f"Multiple delegated admin accounts for {service}. Cannot safely proceed.")
elif args.actually_do_it is True:
# Safe to Proceed
logger.info(f"Enabling {description} Delegation to {args.accountId}")
response = org_client.register_delegated_administrator(
AccountId=args.accountId,
ServicePrincipal=service
)
else:
logger.info(f"Would enable {description} Delegation to {args.accountId}")
def do_args():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--debug", help="print debugging info", action='store_true')
parser.add_argument("--error", help="print error info only", action='store_true')
parser.add_argument("--timestamp", help="Output log with timestamp and toolname", action='store_true')
parser.add_argument("--region", help="Only Process Specified Region")
parser.add_argument("--profile", help="Use this CLI profile (instead of default or env credentials)")
parser.add_argument("--actually-do-it", help="Actually Perform the action", action='store_true')
parser.add_argument("--delegated-admin", dest='accountId', help="Delegate access to this account id", required=True)
args = parser.parse_args()
return(args)
if __name__ == '__main__':
args = do_args()
# Logging idea stolen from: https://docs.python.org/3/howto/logging.html#configuring-logging
# create console handler and set level to debug
logger = logging.getLogger('delegated-admin')
ch = logging.StreamHandler()
if args.debug:
logger.setLevel(logging.DEBUG)
elif args.error:
logger.setLevel(logging.ERROR)
else:
logger.setLevel(logging.INFO)
# Silence Boto3 & Friends
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('boto3').setLevel(logging.WARNING)
logging.getLogger('urllib3').setLevel(logging.WARNING)
# create formatter
if args.timestamp:
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
else:
formatter = logging.Formatter('%(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
try:
main(args, logger)
except KeyboardInterrupt:
exit(1)
|
StarcoderdataPython
|
8018825
|
from .Model import Model
from .TransE import TransE
from .TransH import TransH
from .TransD import TransD
from .TransD import TransD
from .TransR import TransR
from .RESCAL import RESCAL
from .DistMult import DistMult
from .HolE import HolE
from .ComplEx import ComplEx
from .Analogy import Analogy
|
StarcoderdataPython
|
56056
|
import socket
print(" _____ _ _____ _ _ ")
print(" | __ \ | | / ____| (_) | ")
print(" | | | | __ _ _ __| |_ _____| (___ ___ ___ _ _ _ __ _| |_ _ _ ")
print(" | | | |/ _` | '__| __|______\___ \ / _ \/ __| | | | '__| | __| | | |")
print(" | |__| | (_| | | | |_ ____) | __/ (__| |_| | | | | |_| |_| |")
print(" |_____/ \__,_|_| \__| |_____/ \___|\___|\__,_|_| |_|\__|\__, |")
print(" __/ |")
print(" www.hc-security.com.mx by:Equinockx |___/ ")
print(" ")
print("Ingresa la Url:")
url = input()
try:
print("---" * 20)
print(f"La URL ingresada es: {url}")
print("Nombre del Dominio completo: \n" + socket.getfqdn(url))
print("Nombre de Host a direccion IP: \n" + socket.gethostbyname(url))
print("Nombre de host para extender la dirección IP: \n" + str(socket.gethostbyname_ex(url)))
print("Host de solicitud: \n" + socket.gethostname())
print("---" * 20)
except Exception as err:
print("Error" + str(err))
|
StarcoderdataPython
|
160507
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Steps for behavioral style tests are defined in this module.
Each step is defined by the string decorating it.
This string is used to call the step in "*.feature" file.
"""
from __future__ import unicode_literals
import pip
import pexpect
import os
import re
from behave import given, when, then
@given('we have pgcli installed')
def step_install_cli(_):
"""
Check that pgcli is in installed modules.
"""
dists = set([di.key for di in pip.get_installed_distributions()])
assert 'pgcli' in dists
@when('we run pgcli')
def step_run_cli(context):
"""
Run the process using pexpect.
"""
context.cli = pexpect.spawnu('pgcli')
context.exit_sent = False
@when('we wait for prompt')
def step_wait_prompt(context):
"""
Make sure prompt is displayed.
"""
_expect_exact(context, '{0}> '.format(context.conf['dbname']), timeout=5)
@when('we send "ctrl + d"')
def step_ctrl_d(context):
"""
Send Ctrl + D to hopefully exit.
"""
context.cli.sendcontrol('d')
context.exit_sent = True
@when('we send "\?" command')
def step_send_help(context):
"""
Send \? to see help.
"""
context.cli.sendline('\?')
@when('we create database')
def step_db_create(context):
"""
Send create database.
"""
context.cli.sendline('create database {0};'.format(
context.conf['dbname_tmp']))
context.response = {
'database_name': context.conf['dbname_tmp']
}
@when('we drop database')
def step_db_drop(context):
"""
Send drop database.
"""
context.cli.sendline('drop database {0};'.format(
context.conf['dbname_tmp']))
@when('we create table')
def step_create_table(context):
"""
Send create table.
"""
context.cli.sendline('create table a(x text);')
@when('we insert into table')
def step_insert_into_table(context):
"""
Send insert into table.
"""
context.cli.sendline('''insert into a(x) values('xxx');''')
@when('we update table')
def step_update_table(context):
"""
Send insert into table.
"""
context.cli.sendline('''update a set x = 'yyy' where x = 'xxx';''')
@when('we select from table')
def step_select_from_table(context):
"""
Send select from table.
"""
context.cli.sendline('select * from a;')
@when('we delete from table')
def step_delete_from_table(context):
"""
Send deete from table.
"""
context.cli.sendline('''delete from a where x = 'yyy';''')
@when('we drop table')
def step_drop_table(context):
"""
Send drop table.
"""
context.cli.sendline('drop table a;')
@when('we connect to test database')
def step_db_connect_test(context):
"""
Send connect to database.
"""
db_name = context.conf['dbname']
context.cli.sendline('\connect {0}'.format(db_name))
@when('we start external editor providing a file name')
def step_edit_file(context):
"""
Edit file with external editor.
"""
context.editor_file_name = 'test_file_{0}.sql'.format(context.conf['vi'])
if os.path.exists(context.editor_file_name):
os.remove(context.editor_file_name)
context.cli.sendline('\e {0}'.format(context.editor_file_name))
_expect_exact(context, 'nano', timeout=2)
@when('we type sql in the editor')
def step_edit_type_sql(context):
context.cli.sendline('select * from abc')
# Write the file.
context.cli.sendcontrol('o')
# Confirm file name sending "enter".
context.cli.sendcontrol('m')
@when('we exit the editor')
def step_edit_quit(context):
context.cli.sendcontrol('x')
@then('we see the sql in prompt')
def step_edit_done_sql(context):
_expect_exact(context, 'select * from abc', timeout=2)
# Cleanup the command line.
context.cli.sendcontrol('u')
# Cleanup the edited file.
if context.editor_file_name and os.path.exists(context.editor_file_name):
os.remove(context.editor_file_name)
@when('we connect to postgres')
def step_db_connect_postgres(context):
"""
Send connect to database.
"""
context.cli.sendline('\connect postgres')
@when('we refresh completions')
def step_refresh_completions(context):
"""
Send refresh command.
"""
context.cli.sendline('\\refresh')
@then('pgcli exits')
def step_wait_exit(context):
"""
Make sure the cli exits.
"""
_expect_exact(context, pexpect.EOF, timeout=5)
@then('we see pgcli prompt')
def step_see_prompt(context):
"""
Wait to see the prompt.
"""
_expect_exact(context, '{0}> '.format(context.conf['dbname']), timeout=5)
@then('we see help output')
def step_see_help(context):
for expected_line in context.fixture_data['help_commands.txt']:
_expect_exact(context, expected_line, timeout=1)
@then('we see database created')
def step_see_db_created(context):
"""
Wait to see create database output.
"""
_expect_exact(context, 'CREATE DATABASE', timeout=2)
@then('we see database dropped')
def step_see_db_dropped(context):
"""
Wait to see drop database output.
"""
_expect_exact(context, 'DROP DATABASE', timeout=2)
@then('we see database connected')
def step_see_db_connected(context):
"""
Wait to see drop database output.
"""
_expect_exact(context, 'You are now connected to database', timeout=2)
@then('we see table created')
def step_see_table_created(context):
"""
Wait to see create table output.
"""
_expect_exact(context, 'CREATE TABLE', timeout=2)
@then('we see record inserted')
def step_see_record_inserted(context):
"""
Wait to see insert output.
"""
_expect_exact(context, 'INSERT 0 1', timeout=2)
@then('we see record updated')
def step_see_record_updated(context):
"""
Wait to see update output.
"""
_expect_exact(context, 'UPDATE 1', timeout=2)
@then('we see data selected')
def step_see_data_selected(context):
"""
Wait to see select output.
"""
_expect_exact(context, 'yyy', timeout=1)
_expect_exact(context, 'SELECT 1', timeout=1)
@then('we see record deleted')
def step_see_data_deleted(context):
"""
Wait to see delete output.
"""
_expect_exact(context, 'DELETE 1', timeout=2)
@then('we see table dropped')
def step_see_table_dropped(context):
"""
Wait to see drop output.
"""
_expect_exact(context, 'DROP TABLE', timeout=2)
@then('we see completions refresh started')
def step_see_refresh_started(context):
"""
Wait to see refresh output.
"""
_expect_exact(context, 'refresh started in the background', timeout=2)
def _expect_exact(context, expected, timeout):
try:
context.cli.expect_exact(expected, timeout=timeout)
except:
# Strip color codes out of the output.
actual = re.sub('\x1b\[[0-9;]*m', '', context.cli.before)
actual = re.sub('\x1b\[(.*)?.{1}', '', actual)
raise Exception('Expected:\n---\n{0}\n---\n\nActual:\n---\n{1}\n---'.format(
expected,
actual))
|
StarcoderdataPython
|
1636178
|
<gh_stars>0
#%% [markdown]
## Conversao de tipos
2 + 3 #Soma
'2' + '3' #Concatenação
# 2 + '3'
a = 2
b = '3'
print(type(a))
print(type(b))
#Convertendo uma string para int
print(a + int(b))
#Convertendo a variável a (tipo int) para string. Resultado: concatenação
print(str(a) + b)
#Resultado: string (resultado da conversão)
type(str(a))
"""
#Erro: Caracteres de uma string que não são números não
suportam a conversão para int
"""
#Exemplo
# print(2 + int('2 Victor'))
#Convertendo para float
print(2 + float('3.4'))
print(2 + float('3'))
|
StarcoderdataPython
|
11330466
|
<reponame>f-ilic/tdv<gh_stars>0
import torch
import torch.nn.functional
class TVL2Regularizer(torch.nn.Module): # l2
def __init__(self, *args, **kwargs):
super(TVL2Regularizer, self).__init__()
Kx = torch.Tensor([[0, 0, 0],
[0, -1, 1],
[0, 0, 0]])
Ky = torch.Tensor([[0, 0, 0],
[0, -1, 0],
[0, 1, 0]])
self.K = torch.stack([Kx, Ky]).unsqueeze(1)
def forward(self, x):
return self.grad(x)
def energy(self, x):
_,_,h,w = x.shape
padded = torch.nn.functional.pad(x, (1, 1, 1, 1), "constant", 0) # # zero pad
Kx = torch.nn.functional.conv2d(padded, self.K, padding=0)
E = (Kx**2).sum(dim=1,keepdim=True)
self.grad_E = Kx
return E
def grad(self, x):
E = self.energy(x)
cT = torch.nn.functional.conv_transpose2d(self.grad_E, self.K, padding=1)
return cT
def get_theta(self):
return self.named_parameters()
def get_vis(self):
raise NotImplementedError
class TVL1Regularizer(torch.nn.Module): # l2
# https://www.google.com/url?sa=i&url=https%3A%2F%2Fwww.researchgate.net%2Ffigure%2FProximal-operator-and-Moreaus-envelop-of-the-absolute-function-Proximal-operator-of_fig3_317013217&psig=AOvVaw0pTk02A6QTWSlBlN4BmZnp&ust=1627141244838000&source=images&cd=vfe&ved=0CAoQjRxqFwoTCMCvg9HD-fECFQAAAAAdAAAAABAJ
def __init__(self, *args, **kwargs):
super(TVL1Regularizer, self).__init__()
Kx = torch.Tensor([[0, 0, 0],
[0, -1, 1],
[0, 0, 0]])
Ky = torch.Tensor([[0, 0, 0],
[0, -1, 0],
[0, 1, 0]])
self.K = torch.stack([Kx, Ky]).cuda().unsqueeze(1)
def forward(self, x):
return self.grad(x)
def energy(self, x):
_,_,h,w = x.shape
padded = torch.nn.functional.pad(x, (1, 1, 1, 1), "constant", 0) # zero pad
Kx = torch.nn.functional.conv2d(padded, self.K, padding=0)
E = (Kx).abs().sum(dim=1, keepdim=True)
self.grad_E = torch.sign(Kx)*((Kx!=0).float())
return E
def grad(self, x):
E = self.energy(x) # already computes gradients
cT = torch.nn.functional.conv_transpose2d(self.grad_E, self.K, padding=1)
return cT
def get_theta(self):
return self.named_parameters()
def get_vis(self):
raise NotImplementedError
|
StarcoderdataPython
|
5089135
|
#!/usr/bin/env python
from __future__ import division
import sys
import json
import requests
from herepy.here_api import HEREApi
from herepy.utils import Utils
from herepy.error import HEREError
from herepy.models import RmeResponse
class RmeApi(HEREApi):
"""A python interface into the RME API"""
def __init__(self,
app_id=None,
app_code=None,
timeout=None):
"""Returns a RmeApi instance.
Args:
app_id (str):
App Id taken from HERE Developer Portal.
app_code (str):
App Code taken from HERE Developer Portal.
timeout (int):
Timeout limit for requests.
"""
super(RmeApi, self).__init__(app_id, app_code, timeout)
self._base_url = 'https://rme.api.here.com/2/matchroute.json'
def __get(self, data):
url = Utils.build_url(self._base_url, extra_params=data)
response = requests.get(url, timeout=self._timeout)
try:
json_data = json.loads(response.content.decode('utf8'))
if json_data.get('TracePoints') != None:
return RmeResponse.new_from_jsondict(json_data)
else:
return HEREError(json_data.get('Details', 'Error occured on function ' + sys._getframe(1).f_code.co_name))
except ValueError as err:
return HEREError('Error occured on function ' + sys._getframe(1).f_code.co_name + ' ' + str(err))
def match_route(self, gpx_file_content, route_mode='car', pde_layers=[]):
"""Retrieves misc information about the route given in gpx file
Args:
gpxfile content (str):
gpx file content as string
routemode (str):
route mode ('car')
pde_layers (str list):
PDE layers to retrieve e.g.:
ROAD_GEOM_FCn(TUNNEL)
SPEED_LIMITS_FCn(FROM_REF_SPEED_LIMIT,TO_REF_SPEED_LIMIT)
ADAS_ATTRIB_FCn(SLOPES)
or e.g.,
ROAD_GEOM_FCn(*)
SPEED_LIMITS_FCn(*)
Returns:
RmeResponse or HEREError instance"""
data = {'file': Utils.get_zipped_base64(gpx_file_content),
'route_mode': route_mode,
'attributes': ','.join(pde_layers),
'app_id': self._app_id,
'app_code': self._app_code}
return self.__get(data)
|
StarcoderdataPython
|
6469867
|
'''
算法: 1. 把所有元素放入哈希表。 2. 遍历哈希表,对每个元素依次值加(减)一,并检查是否在哈希表中。如果存在,移出哈希表。
func longestConsecutive (nums []int) int {
// write your code here
m := make(map[int]bool)
for _, v := range nums {
m[v] = true
}
res, down, up := 1, 0, 0
for k := range m {
for i := k + 1; m[i]; i++ {
up++
delete(m, i)
}
for i := k - 1; m[i]; i-- {
down++
delete(m, i)
}
delete(m, k)
if 1+up+down > res {
res = 1 + up + down
}
down, up = 0, 0 // important
}
return res
}
'''
|
StarcoderdataPython
|
366527
|
import nbformat as nbf
import sys
# Collect a list of all notebooks in the content folder
filenames = sys.argv[1:]
text = '# Solution'
replacement = ''
# Search through each notebook
for filename in filenames:
ntbk = nbf.read(filename, nbf.NO_CONVERT)
for cell in ntbk.cells:
# remove tags
if 'tags' in cell['metadata']:
tags = cell['metadata']['tags']
cell['metadata']['tags'] = []
# Remove code from fill-in cells
#if 'fill-in' in tags:
# cell['source'] = ''
# remove output
if 'outputs' in cell:
cell['outputs'] = []
# remove solutions
if cell['source'].startswith(text):
cell['source'] = replacement
nbf.write(ntbk, filename)
|
StarcoderdataPython
|
399255
|
from file_1 import *
def target_func(x=func1, y=func2(), z=lambda: func3, w=lambda: func4()):
p1 = lambda: func5()
p2 = lambda: func6
p1(), p2()
def inner(ix=func7, iy=func8(), iz=lambda: func9, iw=lambda: func10()):
func11()
ip = lambda: func12()
ip()
func13()
inner(func14, func15(), lambda: func16, lambda: func17())
return func18
target_<caret>func()
|
StarcoderdataPython
|
1771366
|
import datetime
import smtplib, ssl
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
current = datetime.datetime.now()
renew = current.minute + 3
print(renew)
print(current)
while(True):
current = datetime.datetime.now()
if current.minute == renew:
print("Time to renew")
port = 465 # For SSL
smtp_server = "smtp.gmail.com"
senderEmail = "<EMAIL>"
receiverEmail = "<EMAIL>"
password = "<PASSWORD>*"
requestedDay = "13th June" #Will need to be taken from db or site etc
courseT = "forklift" #Will need to be taken from db or site etc
message = MIMEMultipart("alternative")
message["Subject"] = "Renewal Date Soon!"
message["From"] = senderEmail
message["To"] = receiverEmail
html = """\
<html>
<body>
<p>Hi,<br>
Your certificate will need to be renewed on at {} mins past for {}.
Please apply now to secure your spot.
</p>
</body>
</html>
""".format(renew, courseT)
part1 = MIMEText(html, "html")
message.attach(part1)
context = ssl.create_default_context()
with smtplib.SMTP_SSL(smtp_server, port, context=context) as server:
server.login(senderEmail, password)
server.sendmail(senderEmail, receiverEmail, message.as_string())
break
|
StarcoderdataPython
|
5132614
|
<reponame>Marzooq13579/Hack-Gadgets<gh_stars>100-1000
#!/usr/bin/env python
try:
from mechanize import Request, urlopen, URLError, HTTPError,ProxyHandler, build_opener, install_opener, Browser
except ImportError:
print "\n[X] Please install mechanize module:"
print " http://wwwsearch.sourceforge.net/mechanize/\n"
exit()
from collections import defaultdict
import random
import threading
from core.constants import USER_AGENTS
from core.target import Target
class Crawler(threading.Thread):
def __init__(self, engine, queue, crawl_links = False, crawl_forms = False):
threading.Thread.__init__(self)
self.engine = engine
self.queue = queue
self.results = []
self.errors = {}
self.crawl_links = crawl_links
self.crawl_forms = crawl_forms
self.browser = Browser()
self._setProxies()
self._setHeaders()
def _setHeaders(self):
if self.engine.getOption('ua') is not None:
if self.engine.getOption('ua') is "RANDOM":
self.browser.addheaders = [('User-Agent', random.choice(USER_AGENTS))]
else:
self.browser.addheaders = [('User-Agent', self.engine.getOption('ua'))]
if self.engine.getOption("cookie") is not None:
self.browser.addheaders = [("Cookie", self.engine.getOption("cookie"))]
def _setProxies(self):
if self.engine.getOption('http-proxy') is not None:
self.browser.set_proxies({'http': self.engine.getOption('http-proxy')})
def _addError(self, key, value):
if self.errors.has_key(key):
self.errors[key].append(value)
else:
self.errors[key] = [value]
def _crawlLinks(self, target):
# If UA is RANDOM we need to refresh browser's headers
if self.engine.getOption("ua") is "RANDOM": self._setHeaders()
try: self.browser.open(target.getAbsoluteUrl())
except HTTPError, e:
self._addError(e.code, target.getAbsoluteUrl())
return False
except URLError, e:
self._addError(e.reason, target.getAbsoluteUrl())
return False
except:
self._addError('Unknown', target.getAbsoluteUrl())
return False
else:
try:
links = self.browser.links()
except:
print "[X] Can't retrieve links"
return False
new_targets = []
for link in links:
if link.url.startswith(target.getBaseUrl()):
# Local Absolute url
new_targets.append(link.url)
continue
elif link.url.startswith("/"):
# Local Relative url, starting with /
link.url = target.getBaseUrl() + link.url
new_targets.append(link.url)
continue
elif link.url.startswith("http://") or link.url.startswith("www."):
# Absolute external links starting with http:// or www.
continue
else:
# Everything else, should only be local urls not starting with /
# If it's not the case they'll return 404 - i can live with that
link.url = target.getBaseUrl() + "/" + link.url
new_targets.append(link.url)
# Remove duplicate links
new_targets = set(new_targets)
#print "[-] Found %s unique URLs" % len(new_targets)
# Build new targets
for t in new_targets:
self.results.append(Target(t))
def _crawlForms(self, target):
# If UA is RANDOM we need to refresh browser's headers
if self.engine.getOption("ua") is "RANDOM": self._setHeaders()
try: self.browser.open(target.getAbsoluteUrl())
except HTTPError, e:
self._addError(e.code, target.getAbsoluteUrl())
return False
except URLError, e:
self._addError(e.reason, target.getAbsoluteUrl())
return False
except:
self._addError('Unknown', target.getAbsoluteUrl())
return False
else:
try:
forms = self.browser.forms()
except:
print "[X] Can't retrieve forms"
return False
for form in forms:
form_data = form.click_request_data()
if form.method is "POST" and form_data[1] is None:
# If the post form has no data to send
continue
elif form.method is "GET" and form_data[1] is not None:
# GET forms
nt = Target(form_data[0]+"?"+form_data[1], method = form.method, data = None)
self.results.append(nt)
else:
nt = Target(form_data[0], method = form.method, data = form_data[1])
self.results.append(nt)
def run(self):
while True:
try:
target = self.queue.get(timeout = 1)
except:
try:
self.queue.task_done()
except ValueError:
pass
else:
if self.crawl_links:
self._crawlLinks(target)
if self.crawl_forms:
self._crawlForms(target)
# task done
try:
self.queue.task_done()
except ValueError:
pass
|
StarcoderdataPython
|
1975196
|
<gh_stars>0
# +
import nbformat
import re
class assignment_rubric:
def __init__(self, file):
self.rubric_items = []
self.question_points = {}
self.subquestion_points = {}
self.get_rubric_items(file)
self.get_points()
def get_rubric_items(self, file):
nb = nbformat.read(file, as_version=nbformat.NO_CONVERT)
nb.cells
source = "\n".join([cell['source'] for cell in nb.cells if cell['cell_type'] == 'code' ])
#print(source)
pattern = r"### BEGIN GRADING(.*?)### END GRADING"
rubric_strings = "".join(re.findall(re.compile(pattern, re.DOTALL),source))
#print(rubric_strings)
rubric_lines = [s for s in rubric_strings.splitlines() if s.strip() != '']
self.rubric_items = []
for l in rubric_lines:
try:
r = {}
r['text'] = l.strip("# ").split(";")[0].rstrip()
r['points'] = int(l.split(";")[1])
self.rubric_items.append(r)
except IndexError:
print("Problem parsing this line:")
print(l)
raise IndexError
def get_points(self):
self.question_points = {}
self.subquestion_points = {}
for r in self.rubric_items:
question = r['text'][0]
subquestion = r['text'][0:2]
if question in self.question_points.keys():
self.question_points[question] += r['points']
else:
self.question_points[question] = r['points']
if subquestion in self.subquestion_points.keys():
self.subquestion_points[subquestion] += r['points']
else:
self.subquestion_points[subquestion] = r['points']
def print_overview(self, print_rubric = False):
total_points = 0
for k1 in self.question_points.keys():
print("Question %s: %d points" % (k1, self.question_points[k1]))
total_points += self.question_points[k1]
for k2 in [k for k in self.subquestion_points.keys() if k[0] == k1]:
print(" - %s: %d points" %
(k2, self.subquestion_points[k2]))
if print_rubric:
for r in self.rubric_items:
if r['text'][0:2] == k2:
print(" *", r['text'], "(%d)" % r['points'])
print()
print("Total points:", total_points)
def print_rubric(self):
for r in self.rubric_items:
print(r['text'])
print(r['points'])
# -
# file = "Test notebook.ipynb"
# file = "../../course_material_2020_2021/Assignment 2/TN2513 Assignment 2.ipynb"
# a = assignment_rubric(file)
# a.print_rubric()
|
StarcoderdataPython
|
3482764
|
from rest_framework.generics import ListCreateAPIView, RetrieveAPIView
from .serializers import RoomSerializer
from room.models import Room
class RoomListCreateAPIView(ListCreateAPIView):
queryset = Room.objects.all()
serializer_class = RoomSerializer
class RoomRetrieveAPIView(RetrieveAPIView):
queryset = Room.objects.all()
serializer_class = RoomSerializer
|
StarcoderdataPython
|
11253595
|
"""
Torch argmax policy
"""
import numpy as np
import railrl.torch.pytorch_util as ptu
from railrl.policies.base import SerializablePolicy
from railrl.torch.core import PyTorchModule
import torch
class ArgmaxDiscretePolicy(PyTorchModule, SerializablePolicy):
def __init__(self, qf):
self.save_init_params(locals())
super().__init__()
self.qf = qf
def get_action(self, ob_np, goal_np, tau_np):
ob_np = np.expand_dims(ob_np, axis=0)
goal_np = np.expand_dims(goal_np, axis=0)
tau_np = np.expand_dims(tau_np, axis=0).astype(np.float32)
# ob_np = ptu.from_numpy(ob_np).float()
# goal_np = ptu.from_numpy(goal_np).float()
# tau_np = ptu.from_numpy(tau_np).float()
# q_values = [self.qf(observations=ob_np, actions=ptu.from_numpy(np.array([[a]])).float(),
# goals = goal_np, num_steps_left=tau_np) for a in range(5)]
# q_values_np = ptu.get_numpy(q_values)
q_values_np = np.array([self.qf.eval_np(observations=ob_np, actions=np.array([[a]]), goals=goal_np, num_steps_left=tau_np) for a in range(5)])
return q_values_np.argmax(), {}
|
StarcoderdataPython
|
3342183
|
<reponame>zignig/cqparts_bucket<filename>plank.py
" A plank for mounting stuff on "
import cadquery as cq
import cqparts
from cqparts.params import *
from cqparts.constraint import Fixed, Coincident
from cqparts_misc.basic.primatives import Box
from cqparts.display import render_props
from .manufacture import Lasercut
class Plank(Box, Lasercut):
length = PositiveFloat(90)
width = PositiveFloat(90)
thickness = PositiveFloat(3)
_render = render_props(template="wood")
fillet = PositiveFloat(3)
def make(self):
pl = cq.Workplane("XY").box(self.length, self.width, self.thickness)
pl = pl.translate((0, 0, self.thickness / 2))
if self.fillet > 0:
pl = pl.edges("|Z").fillet(self.fillet)
return pl
if __name__ == "__main__":
from cqparts.display import display
display(Plank())
|
StarcoderdataPython
|
5117742
|
from compas.geometry import Frame
from compas.robots import LocalPackageMeshLoader
import compas_fab
from compas_fab.backends.kinematics import AnalyticalInverseKinematics
from compas_fab.backends import PyBulletClient
urdf_filename = compas_fab.get('universal_robot/ur_description/urdf/ur5.urdf')
srdf_filename = compas_fab.get('universal_robot/ur5_moveit_config/config/ur5.srdf')
frame_WCF = Frame((0.381, 0.093, 0.382), (0.371, -0.292, -0.882), (0.113, 0.956, -0.269))
with PyBulletClient(connection_type='direct') as client:
# Load UR5
loader = LocalPackageMeshLoader(compas_fab.get('universal_robot'), 'ur_description')
robot = client.load_robot(urdf_filename, [loader])
client.load_semantics(robot, srdf_filename)
ik = AnalyticalInverseKinematics(client)
# set a new IK function
client.inverse_kinematics = ik.inverse_kinematics
options = {"solver": "ur5", "check_collision": True, "keep_order": True}
for solution in robot.iter_inverse_kinematics(frame_WCF, options=options):
print(solution)
|
StarcoderdataPython
|
23385
|
import spidev
columns = [0x1,0x2,0x3,0x4,0x5,0x6,0x7,0x8]
LEDOn = [0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF]
LEDOff = [0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0]
LEDEmoteSmile = [0x0,0x0,0x24,0x0,0x42,0x3C,0x0,0x0]
LEDEmoteSad = [0x0,0x0,0x24,0x0,0x0,0x3C,0x42,0x0]
LEDEmoteTongue = [0x0,0x0,0x24,0x0,0x42,0x3C,0xC,0x0]
LEDEmoteSurprise = [0x0,0x0,0x24,0x0,0x18,0x24,0x24,0x18]
spi = None
def setup(robot_config):
global LEDEmoteSmile
global LEDEmoteSad
global LEDEmoteTongue
global LEDEmoteSuprise
global module
global spi
#LED controlling
spi = spidev.SpiDev()
spi.open(0,0)
#VCC -> RPi Pin 2
#GND -> RPi Pin 6
#DIN -> RPi Pin 19
#CLK -> RPi Pin 23
#CS -> RPi Pin 24
# decoding:BCD
spi.writebytes([0x09])
spi.writebytes([0x00])
# Start with low brightness
spi.writebytes([0x0a])
spi.writebytes([0x03])
# scanlimit; 8 LEDs
spi.writebytes([0x0b])
spi.writebytes([0x07])
# Enter normal power-mode
spi.writebytes([0x0c])
spi.writebytes([0x01])
# Activate display
spi.writebytes([0x0f])
spi.writebytes([0x00])
rotate = robot_config.getint('max7219', 'ledrotate')
if rotate == 180:
LEDEmoteSmile = LEDEmoteSmile[::-1]
LEDEmoteSad = LEDEmoteSad[::-1]
LEDEmoteTongue = LEDEmoteTongue[::-1]
LEDEmoteSurprise = LEDEmoteSurprise[::-1]
SetLED_Off()
def SetLED_On():
for i in range(len(columns)):
spi.xfer([columns[i],LEDOn[i]])
def SetLED_Off():
for i in range(len(columns)):
spi.xfer([columns[i],LEDOff[i]])
def SetLED_E_Smiley():
for i in range(len(columns)):
spi.xfer([columns[i],LEDEmoteSmile[i]])
def SetLED_E_Sad():
for i in range(len(columns)):
spi.xfer([columns[i],LEDEmoteSad[i]])
def SetLED_E_Tongue():
for i in range(len(columns)):
spi.xfer([columns[i],LEDEmoteTongue[i]])
def SetLED_E_Surprised():
for i in range(len(columns)):
spi.xfer([columns[i],LEDEmoteSurprise[i]])
def SetLED_Low():
# brightness MIN
spi.writebytes([0x0a])
spi.writebytes([0x00])
def SetLED_Med():
#brightness MED
spi.writebytes([0x0a])
spi.writebytes([0x06])
def SetLED_Full():
# brightness MAX
spi.writebytes([0x0a])
spi.writebytes([0x0F])
def move(args):
command = args['command']
if command == 'LED_OFF':
SetLED_Off()
if command == 'LED_FULL':
SetLED_On()
SetLED_Full()
if command == 'LED_MED':
SetLED_On()
SetLED_Med()
if command == 'LED_LOW':
SetLED_On()
SetLED_Low()
if command == 'LED_E_SMILEY':
SetLED_On()
SetLED_E_Smiley()
if command == 'LED_E_SAD':
SetLED_On()
SetLED_E_Sad()
if command == 'LED_E_TONGUE':
SetLED_On()
SetLED_E_Tongue()
if command == 'LED_E_SURPRISED':
SetLED_On()
SetLED_E_Suprised()
|
StarcoderdataPython
|
1745983
|
import unittest
from unittest import mock
from dataprofiler.labelers import base_model
class TestBaseModel(unittest.TestCase):
@mock.patch('dataprofiler.labelers.base_model.BaseModel.'
'_BaseModel__subclasses',
new_callable=mock.PropertyMock)
def test_register_subclass(self, mock_subclasses):
# remove not implemented func
fake_class = type('FakeModel', (base_model.BaseModel,), {})
fake_class.__abstractmethods__ = []
fake_class._register_subclass()
# base_model.BaseModel._register_subclass()
self.assertIn(
mock.call().__setitem__('fakemodel', fake_class),
mock_subclasses.mock_calls)
@mock.patch('dataprofiler.labelers.base_model.BaseModel.'
'__abstractmethods__', set())
@mock.patch('dataprofiler.labelers.base_model.BaseModel.'
'_validate_parameters', return_value=None)
def test_equality_checks(self, *mocks):
FakeModel1 = type('FakeModel1', (base_model.BaseModel,), {})
FakeModel2 = type('FakeModel2', (base_model.BaseModel,), {})
fake_model1 = FakeModel1(label_mapping={'a': 1, 'b': 2},
parameters={'test': 1})
fake_model1_1 = FakeModel1(label_mapping={'a': 1, 'b': 2},
parameters={'test': 1})
fake_model1_2 = FakeModel1(label_mapping={'c': 2},
parameters={'test': 1})
fake_model1_3 = FakeModel1(label_mapping={'a': 1, 'b': 2},
parameters={'Different': 1})
fake_model2 = FakeModel2(label_mapping={'a': 1, 'b': 2},
parameters={'a': 1, 'b': 2})
# assert True if the same object
self.assertEqual(fake_model1, fake_model1)
# assert True if same class but same params / label_mapping
self.assertEqual(fake_model1, fake_model1_1)
# assert False if diff class even if same params / label_mapping
self.assertNotEqual(fake_model1, fake_model2)
# assert False if same class even diff params / label_mapping
self.assertNotEqual(fake_model1, fake_model1_2)
self.assertNotEqual(fake_model1, fake_model1_3)
# @mock.patch('data_profiler.labelers.base_model.BaseModel._validate_parameters')
def test_get_parameters(self):
mock_model = mock.Mock(spec=base_model.BaseModel)
mock_model._label_mapping = {'a': 1, 'c': '2'}
mock_model._parameters = {'test1': 1, 'test2': '2'}
# base case
params = base_model.BaseModel.get_parameters(mock_model)
self.assertDictEqual(
{'label_mapping': {'a': 1, 'c': '2'}, 'test1': 1, 'test2': '2'},
params)
# param list w/o label_mapping
param_list = ['test1']
params = base_model.BaseModel.get_parameters(mock_model, param_list)
self.assertDictEqual({'test1': 1}, params)
# param list w/ label_mapping
param_list = ['test2', 'label_mapping']
params = base_model.BaseModel.get_parameters(mock_model, param_list)
self.assertDictEqual(
{'label_mapping': {'a': 1, 'c': '2'}, 'test2': '2'}, params)
def test_set_parameters(self):
# validate params set successfully
mock_model = mock.Mock(spec=base_model.BaseModel)
mock_model._parameters = dict()
params = {'test': 1}
base_model.BaseModel.set_params(mock_model, **params)
self.assertDictEqual(params, mock_model._parameters)
# test overwrite params
params = {'test': 2}
base_model.BaseModel.set_params(mock_model, **params)
self.assertDictEqual(params, mock_model._parameters)
# test invalid params
mock_model._validate_parameters.side_effect = ValueError('test')
with self.assertRaisesRegex(ValueError, 'test'):
base_model.BaseModel.set_params(mock_model, **params)
@mock.patch.multiple('dataprofiler.labelers.base_model.BaseModel',
__abstractmethods__=set(),
_validate_parameters=mock.MagicMock(return_value=None))
def test_add_labels(self, *args):
# setup model with mocked abstract methods
mock_model = base_model.BaseModel(
label_mapping={'NEW_LABEL': 1}, parameters={})
# assert bad label inputs
with self.assertRaisesRegex(TypeError, '`label` must be a str.'):
mock_model.add_label(label=None)
with self.assertRaisesRegex(TypeError, '`label` must be a str.'):
mock_model.add_label(label=1)
# assert existing label
label = 'NEW_LABEL'
with self.assertWarnsRegex(UserWarning,
'The label, `{}`, already exists in the '
'label mapping.'.format(label)):
mock_model.add_label(label)
# assert bad same_as input
with self.assertRaisesRegex(TypeError, '`same_as` must be a str.'):
mock_model.add_label(label='test', same_as=1)
label = 'NEW_LABEL_2'
same_as = 'DOES_NOT_EXIST'
with self.assertRaisesRegex(ValueError,
'`same_as` value: {}, did not exist in the '
'label_mapping.'.format(same_as)):
mock_model.add_label(label, same_as)
# assert successful add
label = 'NEW_LABEL_2'
mock_model.add_label(label)
self.assertDictEqual(
{'NEW_LABEL': 1, 'NEW_LABEL_2': 2}, mock_model._label_mapping)
# assert successful add w/ same_as
label = 'NEW_LABEL_3'
mock_model.add_label(label, same_as='NEW_LABEL')
self.assertDictEqual(
{'NEW_LABEL': 1, 'NEW_LABEL_2': 2, 'NEW_LABEL_3': 1},
mock_model._label_mapping)
def test_set_label_mapping_parameters(self):
# setup mock
mock_model = mock.Mock(spec=base_model.BaseModel)
mock_model._convert_labels_to_label_mapping.side_effect = \
base_model.BaseModel._convert_labels_to_label_mapping
# assert non value is not accepted.
with self.assertRaisesRegex(TypeError,
"Labels must either be a non-empty encoding"
" dict which maps labels to index encodings"
" or a list."):
base_model.BaseModel.set_label_mapping(
mock_model, label_mapping=None)
# non-acceptable value case
with self.assertRaisesRegex(TypeError,
"Labels must either be a non-empty encoding"
" dict which maps labels to index encodings"
" or a list."):
base_model.BaseModel.set_label_mapping(
mock_model, label_mapping=1)
# assert error for empty label_mapping dict
with self.assertRaisesRegex(TypeError,
"Labels must either be a non-empty encoding"
" dict which maps labels to index encodings"
" or a list."):
base_model.BaseModel.set_label_mapping(mock_model, label_mapping={})
# assert label_map set
base_model.BaseModel.set_label_mapping(
mock_model, label_mapping={'test': 'test'})
self.assertDictEqual({'test': 'test'}, mock_model._label_mapping)
def test_convert_labels_to_encodings(self, *mocks):
# test label list to label_mapping
labels = ['a', 'b', 'd', 'c']
label_mapping = base_model.BaseModel._convert_labels_to_label_mapping(
labels, requires_zero_mapping=True)
self.assertDictEqual(dict(a=0, b=1, d=2, c=3), label_mapping)
# test label dict to label_mapping
labels = dict(a=1, b=2, d=3, c=4)
label_mapping = base_model.BaseModel._convert_labels_to_label_mapping(
labels, requires_zero_mapping=True)
self.assertDictEqual(dict(a=1, b=2, d=3, c=4), label_mapping)
|
StarcoderdataPython
|
3404617
|
from __future__ import unicode_literals
from django.urls import reverse_lazy
from django.views.generic import ListView
from portfolio.views import BaseSudoView
from portfolio.skills.models import Skills
from portfolio.categories.models import Category
from portfolio.skills.forms import SkillsForm
class SkillsFormsView(BaseSudoView):
model = Skills
form_class = SkillsForm
template_name = 'skill_add.html'
success_url = reverse_lazy('portfolio:skill:skill_index')
class SkillsView(ListView):
model = Skills
template_name = 'skill_index.html'
def get_queryset(self):
return self.model.objects.filter()
def get_context_data(self, **kwargs):
context = super(SkillsView, self).get_context_data(**kwargs)
query_set = Category.objects.all()
context['categories'] = query_set
return context
|
StarcoderdataPython
|
3284484
|
"""
Basic wheel tests.
"""
import os
import pkg_resources
import json
import sys
from pkg_resources import resource_filename
import wheel.util
import wheel.tool
from wheel import egg2wheel
from wheel.install import WheelFile
from zipfile import ZipFile
from shutil import rmtree
test_distributions = ("complex-dist", "simple.dist", "headers.dist")
def teardown_module():
"""Delete eggs/wheels created by tests."""
base = pkg_resources.resource_filename('wheel.test', '')
for dist in test_distributions:
for subdir in ('build', 'dist'):
try:
rmtree(os.path.join(base, dist, subdir))
except OSError:
pass
def setup_module():
build_wheel()
build_egg()
def build_wheel():
"""Build wheels from test distributions."""
for dist in test_distributions:
pwd = os.path.abspath(os.curdir)
distdir = pkg_resources.resource_filename('wheel.test', dist)
os.chdir(distdir)
try:
sys.argv = ['', 'bdist_wheel']
exec(compile(open('setup.py').read(), 'setup.py', 'exec'))
finally:
os.chdir(pwd)
def build_egg():
"""Build eggs from test distributions."""
for dist in test_distributions:
pwd = os.path.abspath(os.curdir)
distdir = pkg_resources.resource_filename('wheel.test', dist)
os.chdir(distdir)
try:
sys.argv = ['', 'bdist_egg']
exec(compile(open('setup.py').read(), 'setup.py', 'exec'))
finally:
os.chdir(pwd)
def test_findable():
"""Make sure pkg_resources can find us."""
assert pkg_resources.working_set.by_key['wheel'].version
def test_egg_re():
"""Make sure egg_info_re matches."""
egg_names = open(pkg_resources.resource_filename('wheel', 'eggnames.txt'))
for line in egg_names:
line = line.strip()
if not line:
continue
assert egg2wheel.egg_info_re.match(line), line
def test_compatibility_tags():
"""Test compatibilty tags are working."""
wf = WheelFile("package-1.0.0-cp32.cp33-noabi-noarch.whl")
assert (list(wf.compatibility_tags) ==
[('cp32', 'noabi', 'noarch'), ('cp33', 'noabi', 'noarch')])
assert (wf.arity == 2)
wf2 = WheelFile("package-1.0.0-1st-cp33-noabi-noarch.whl")
wf2_info = wf2.parsed_filename.groupdict()
assert wf2_info['build'] == '1st', wf2_info
def test_convert_egg():
base = pkg_resources.resource_filename('wheel.test', '')
for dist in test_distributions:
distdir = os.path.join(base, dist, 'dist')
eggs = [e for e in os.listdir(distdir) if e.endswith('.egg')]
wheel.tool.convert(eggs, distdir, verbose=False)
def test_unpack():
"""
Make sure 'wheel unpack' works.
This also verifies the integrity of our testing wheel files.
"""
for dist in test_distributions:
distdir = pkg_resources.resource_filename('wheel.test',
os.path.join(dist, 'dist'))
for wheelfile in (w for w in os.listdir(distdir) if w.endswith('.whl')):
wheel.tool.unpack(os.path.join(distdir, wheelfile), distdir)
def test_no_scripts():
"""Make sure entry point scripts are not generated."""
dist = "complex-dist"
basedir = pkg_resources.resource_filename('wheel.test', dist)
for (dirname, subdirs, filenames) in os.walk(basedir):
for filename in filenames:
if filename.endswith('.whl'):
whl = ZipFile(os.path.join(dirname, filename))
for entry in whl.infolist():
assert not '.data/scripts/' in entry.filename
def test_pydist():
"""Make sure pydist.json exists and validates against our schema."""
# XXX this test may need manual cleanup of older wheels
import jsonschema
def open_json(filename):
return json.loads(open(filename, 'rb').read().decode('utf-8'))
pymeta_schema = open_json(resource_filename('wheel.test',
'pydist-schema.json'))
valid = 0
for dist in ("simple.dist", "complex-dist"):
basedir = pkg_resources.resource_filename('wheel.test', dist)
for (dirname, subdirs, filenames) in os.walk(basedir):
for filename in filenames:
if filename.endswith('.whl'):
whl = ZipFile(os.path.join(dirname, filename))
for entry in whl.infolist():
if entry.filename.endswith('/metadata.json'):
pymeta = json.loads(whl.read(entry).decode('utf-8'))
jsonschema.validate(pymeta, pymeta_schema)
valid += 1
assert valid > 0, "No metadata.json found"
def test_util():
"""Test functions in util.py."""
for i in range(10):
before = b'*' * i
encoded = wheel.util.urlsafe_b64encode(before)
assert not encoded.endswith(b'=')
after = wheel.util.urlsafe_b64decode(encoded)
assert before == after
def test_pick_best():
"""Test the wheel ranking algorithm."""
def get_tags(res):
info = res[-1].parsed_filename.groupdict()
return info['pyver'], info['abi'], info['plat']
cand_tags = [('py27', 'noabi', 'noarch'), ('py26', 'noabi', 'noarch'),
('cp27', 'noabi', 'linux_i686'),
('cp26', 'noabi', 'linux_i686'),
('cp27', 'noabi', 'linux_x86_64'),
('cp26', 'noabi', 'linux_x86_64')]
cand_wheels = [WheelFile('testpkg-1.0-%s-%s-%s.whl' % t)
for t in cand_tags]
supported = [('cp27', 'noabi', 'linux_i686'), ('py27', 'noabi', 'noarch')]
supported2 = [('cp27', 'noabi', 'linux_i686'), ('py27', 'noabi', 'noarch'),
('cp26', 'noabi', 'linux_i686'), ('py26', 'noabi', 'noarch')]
supported3 = [('cp26', 'noabi', 'linux_i686'), ('py26', 'noabi', 'noarch'),
('cp27', 'noabi', 'linux_i686'), ('py27', 'noabi', 'noarch')]
for supp in (supported, supported2, supported3):
context = lambda: list(supp)
for wheel in cand_wheels:
wheel.context = context
best = max(cand_wheels)
assert list(best.tags)[0] == supp[0]
# assert_equal(
# list(map(get_tags, pick_best(cand_wheels, supp, top=False))), supp)
|
StarcoderdataPython
|
332666
|
<filename>BreakTheName.py<gh_stars>0
#cerner_2tothe5th_2021
# Get all substrings of a given string using slicing of string
# initialize the string
input_string = "floccinaucinihilipilification"
# print the input string
print("The input string is : " + str(input_string))
# Get all substrings of the string
result = [input_string[i: j] for i in range(len(input_string))
for j in range(i + 1, len(input_string) + 1)]
# print the result
print("All substrings of the input string are : " + str(result))
|
StarcoderdataPython
|
11233988
|
#%%
import pandas as pd
import numpy as np
import holoviews as hv
import hvplot.pandas
from scipy.sparse.linalg import svds
from scipy.stats import chisquare, chi2_contingency
from sklearn.decomposition import TruncatedSVD
from umoja.ca import CA
hv.extension('bokeh')
#%%
X = context.io.load('xente_train')
Y = context.io.load('xente_sample_submission')
Y_wide = (Y
.loc[:, 'Account X date X PID']
.str.split(' X ', expand=True)
.rename(columns={0:'acc', 1:'date',2:'PID'})
.assign(test = True)
)
context.io.save('xente_sample_submission_wide', Y_wide)
data = (pd.concat([Y_wide, X.assign(test = False)], axis=0)
.reset_index()
.rename(columns={'index':'old_index'})
.assign(acc = lambda df: df.acc.astype('int64')))
context.io.save('xente_merged', data)
# %%
a = (data.drop(columns=['old_index'])
.groupby(['acc', 'PID'])
.date
.apply(lambda df: pd.to_datetime(df).sort_values().diff().dt.total_seconds()
.fillna(0))
.reset_index()
.rename(columns={'level_2': 'index'})
.set_index('index')
.rename(columns={'date': 'time_since_last'}))
b = data.loc[:,['date']].assign(date = lambda df: pd.to_datetime(df.date))
#%%
time_since_last = (a.join(b).sort_values('date'))
#%%
dummies = pd.get_dummies(time_since_last.PID).astype(np.int)
dummies_nan_zero = dummies.replace(0, np.nan)
dummies_nan_neg = dummies.replace(0, -1)
dummies_nan_one = dummies.replace({0:1, 1:0})
dummies_one_nan = dummies.replace({1: np.nan, 0: 1})
# %%
time_since_last_ffill = dummies_nan_zero.multiply(time_since_last.time_since_last, axis=0).ffill()
seconds_since_last_any = time_since_last.date.diff().dt.total_seconds()
time_diff = time_since_last_ffill.add(dummies_nan_one.multiply(seconds_since_last_any, axis=0))
time_diff_censor = time_diff.multiply(dummies_nan_neg)
target = time_diff_censor.fillna(time_diff_censor.min())
# %%
sensor_pred = target.multiply(dummies_one_nan).abs()
features = sensor_pred.fillna(sensor_pred.mean(0))
# %%
context.io.save('xente_features', features)
context.io.save('xente_target', target)
# %%
|
StarcoderdataPython
|
6426877
|
<filename>utils/emulation_host_meta_generator.py
import StringIO
import sys
import os
path = os.path.realpath(__file__)
sys.path.insert(0, '%s/..' % os.path.dirname(os.path.abspath(__file__)))
from ixnetwork.IxnHttp import IxnHttp
def process_node(metadata, class_name):
# build find
expected_states = []
attr_doc_string = '%s"""Find specific %s emulated host sessions using a virtual port name or a parent emulation host and optional filters.\n' % (' ' * 8, metadata.custom.name)
attr_doc_string += '%sFilter values can be a single value or a list of values, see the specific **filter information below.\n\n' % (' ' * 12)
attr_doc_string += '%svport_name: string (The topology that contains the vport with this name will be used as the starting point of the search)\n' % (' ' * 12)
attr_doc_string += '%semulation_host: IxnEmulationHost (A parent emulation host that will be used as the starting point of the search)\n' % (' ' * 12)
attr_doc_string += '%s**filters:\n' % (' ' * 12)
for attribute in metadata.custom.attributes:
if attribute.deprecated is False:
if attribute.type.name == 'object':
continue
if attribute.type.name == 'list' and attribute.type.innerType.name == 'enum':
expected_states = attribute.type.innerType.enums
continue
attr_doc_string += '%s%s: %s' % (' ' * 16, attribute.name, attribute.type.name)
attr_doc_string += '\n'
attr_doc_string += '%s"""\n' % (' ' * 8)
fid = StringIO.StringIO()
# write class and __init__
fid.write('class %s(IxnEmulationHost):\n' % (class_name))
fid.write('%s"""Generated NGPF %s emulation host """\n' % (' ' * 4, metadata.custom.name))
fid.write('\n')
for expected_state in expected_states:
fid.write('%sSTATE_%s = \'%s\'\n' % (' ' * 4, expected_state.upper(), expected_state))
fid.write('\n')
fid.write('%sdef __init__(self, ixnhttp):\n' % (' ' * 4))
fid.write('%ssuper(%s, self).__init__(ixnhttp)\n' % (' ' * 8, class_name))
fid.write('\n')
# write find
class_names = metadata.custom.path.split('/')[1:]
class_names = ','.join('"{0}"'.format(class_name) for class_name in class_names)
fid.write('%sdef find(self, vport_name=None, emulation_host=None, **filters):\n' % (' ' * 4))
fid.write(attr_doc_string)
fid.write('%sreturn super(%s, self).find([%s], vport_name, emulation_host, filters)\n' % (' ' * 8, class_name, class_names))
fid.write('\n')
# write operations
if len(expected_states) > 0:
for operation in metadata.custom.operations:
if len(operation.args) == 2 and operation.args[1].type.name == 'string':
fid.write('%sdef %s(self, expected_state=None, timeout=None):\n' % (' ' * 4, operation.operation.lower()))
fid.write('%s"""%s\n' % (' ' * 8, ' '.join(operation.description.split())))
fid.write('%sFor expected_state options see the class state variables\n' % (' ' * 12))
fid.write('%s"""\n' % (' ' * 8))
fid.write('%ssuper(%s, self).call_operation(\'%s\', expected_state, timeout)\n' % (' ' * 8, class_name, operation.operation))
fid.write('\n')
fid.write('\n')
classes[metadata.custom.name] = fid.getvalue()
fid.close()
print('GENERATED: %s' % class_name)
def process_class(path, name):
if name in classes.keys():
return
if name in ['connector', 'item', 'port', 'tlvProfile', 'learnedInfo', 'genericProtocol','packetInList']:
return
try:
metadata = ixnhttp.help(path)
class_name = 'Ixn%s%sEmulation' % (metadata.custom.name[0].upper(), metadata.custom.name[1:])
process_node(metadata, class_name)
for child in metadata.custom.children:
process_class(child.path, child.name)
except:
print('skipping %s' % name)
ixnhttp = IxnHttp('10.200.22.48', 12345)
sessions = ixnhttp.sessions()
ixnhttp.current_session = sessions[0]
classes = {}
process_class('/topology', 'topology')
filename = '%s/../ixnetwork/IxnEmulationHosts.py' % os.path.dirname(os.path.realpath(__file__))
with open(filename, 'w') as fid:
fid.write('from ixnetwork.IxnEmulationHost import IxnEmulationHost\n\n')
keys = classes.keys()
keys.sort()
for key in keys:
fid.write(classes[key])
fid.flush()
|
StarcoderdataPython
|
9720702
|
<gh_stars>0
import os
import sys
import time
import numpy as np
import torch
import torch.distributed as dist
import torch.utils.collect_env
from contextlib import contextmanager
from torch.nn.parallel import DistributedDataParallel
class DDP(DistributedDataParallel):
# Distributed wrapper. Supports asynchronous evaluation and model saving
def forward(self, *args, **kwargs):
# DDP has a sync point on forward. No need to do this for eval. This allows us to have different batch sizes
if self.training: return super().forward(*args, **kwargs)
else: return self.module(*args, **kwargs)
def load_state_dict(self, *args, **kwargs):
self.module.load_state_dict(*args, **kwargs)
def syn_buffer_params(self):
self._sync_params_and_buffers()
def state_dict(self, *args, **kwargs):
return self.module.state_dict(*args, **kwargs)
def get_rank():
"""
get distributed rank
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
rank=torch.distributed.get_rank()
else:
rank=0
return rank
def get_world_size():
"""
get total number of distributed workers
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
world_size=torch.distributed.get_world_size()
else:
world_size=1
return world_size
def init_distributed(cuda):
"""
initialized distributed backend
cuda: bool True to initilialize nccl backend
"""
world_size=int(os.environ.get('WORLD_SIZE',1))
distributed=(world_size>1)
if distributed:
backend='nccl' if cuda else 'gloo'
dist.init_process_group(backend=backend,init_method='env://')
assert dist.is_initialized()
return distributed
def set_device(cuda, local_rank):
"""
Sets device based on local_rank and returns instance of torch.device.
:param cuda: if True: use cuda
:param local_rank: local rank of the worker
"""
if cuda:
torch.cuda.set_device(local_rank)
device = torch.device('cuda')
else:
device = torch.device('cpu')
return device
def barrier():
"""
Call torch.distributed.barrier() if distritubed is in use
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
torch.distributed.barrier()
def get_rank():
"""
Gets distributed rank or returns zero if distributed is not initialized.
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
rank = torch.distributed.get_rank()
else:
rank = 0
return rank
class AverageMeter:
"""
Computes and stores the average and current value
"""
def __init__(self, warmup=0, keep=False):
self.reset()
self.warmup = warmup
self.keep = keep
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
self.iters = 0
self.vals = []
def update(self, val, n=1):
self.iters += 1
self.val = val
if self.iters > self.warmup:
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
if self.keep:
self.vals.append(val)
def reduce(self, op):
"""
Reduces average value over all workers.
:param op: 'sum' or 'mean', reduction operator
"""
if op not in ('sum', 'mean'):
raise NotImplementedError
distributed = (get_world_size() > 1)
print('all reduce start',distributed)
if distributed:
backend = dist.get_backend()
print('backend is {}'.format(backend))
cuda = (backend == 'nccl')#== dist.Backend.NCCL)
print(cuda)
if cuda:
avg = torch.cuda.FloatTensor([self.avg])
_sum = torch.cuda.FloatTensor([self.sum])
else:
avg = torch.FloatTensor([self.avg])
_sum = torch.FloatTensor([self.sum])
print(_sum)
print(avg)
dist.all_reduce(avg)
dist.all_reduce(_sum)
print(avg.item(),_sum.item())
self.avg = avg.item()
self.sum = _sum.item()
if op == 'mean':
self.avg /= get_world_size()
self.sum /= get_world_size()
print('all_recude done')
|
StarcoderdataPython
|
4947784
|
<reponame>idigbio-citsci-hackathon/CsvToolbox<filename>src/anonymizer.py
#!/usr/bin/env python
# Given a CSV file, generates another CSV anonymizing the column specified
# as usernames while maintaining the other columns (quotes around a value
# may vary from the original file). The column ID specification is 0-based.
import argparse
import csv
import itertools
import time
"""
Class for anonymizing data from a CSV file (assumed to have a single
header line)
"""
class Anonymizer:
def __init__(self, inputFile, outputFile):
# Input CSV file
self.inputFile = inputFile
# Output CSV file
self.outputFile = outputFile
# Array with column IDs to check (ID is 0-based)
self.checkColumns = None
# Value used to indicate that no user is attached to an entry
self.noUser = "not-logged-in"
print "Input:",self.inputFile
print "Output:",self.outputFile
def parseColRange(self, rangeStr):
result = set()
for part in rangeStr.split(','):
x = part.split('-')
result.update(range(int(x[0]), int(x[-1])+1))
self.checkColumns = sorted(result)
if len(self.checkColumns) == 0:
raise Exception("Must provide column IDs that are required to have user data!")
def anonymize(self):
# Set up input file reader
csvInputFile = open(self.inputFile, 'rb')
csvInputFileReader = csv.reader(csvInputFile, dialect='excel')
# Set up output files
csvOutputFile = open(self.outputFile, 'wb')
csvOutputWriter = csv.writer(csvOutputFile, dialect='excel')
# Example for forcing double quotes on all fields, and using UNIX-style new line
# csvOutputWriter = csv.writer(csvOutputFile, dialect='excel', quoting=csv.QUOTE_ALL, lineterminator='\n')
# Read the header line
header = csvInputFileReader.next()
# Retrieve all data columns as rows (cols[0] contains column 0)
cols = zip(*csvInputFileReader)
# For each column to be anonymized, sorted and filter duplicates
for i in self.checkColumns:
uniqVals = sorted(set(cols[i]))
uniqVals.remove(self.noUser)
cleanCol = []
# Replace value in column with anonymous ID
for j,item in enumerate(cols[i]):
try:
uid = uniqVals.index(item)
cleanCol.append('user' + str(uid))
except ValueError:
cleanCol.append(self.noUser)
cols[i] = cleanCol
# Write header back
csvOutputWriter.writerow(header)
# Write sorted/filtered columns after transposing the rows back to columns
# Since each column may have different length, use the length of the
# longest column and fill other cells with empty string
csvOutputWriter.writerows(itertools.izip_longest(*cols, fillvalue=''))
csvInputFile.close()
csvOutputFile.close()
"""
Parses input arguments, constructs a Anonymizer to remove user identity
from a CSV file with crowdsourced information.
Example execution: python anonymizer.py -i ../data/CrowdsourcedData.csv
-o ../data/AnonymizedDataset.csv -c 6
"""
def main():
# Parse input arguments
parser = argparse.ArgumentParser()
parser.add_argument("-i", required=True, help="Input csv file to anonymize")
parser.add_argument("-c", required=True, help="Comma separated list of \
columns IDs that are to be anonymized, ID is 0-based")
parser.add_argument("-o", "--output", required=True, help="Output csv file")
args = parser.parse_args()
# Instantiate and configure work csv anonymizer
startTime = time.time()
anon = Anonymizer(args.i, args.output)
anon.parseColRange(args.c)
anon.anonymize()
print "Done in ", time.time() - startTime, " secs"
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
4885299
|
"""initial
Revision ID: 5a2877e096ed
Revises:
Create Date: 2022-03-20 21:22:58.439923
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "5a2877e096ed"
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.execute('CREATE EXTENSION IF NOT EXISTS "uuid-ossp";')
op.create_table(
"hosts",
sa.Column(
"guid",
postgresql.UUID(as_uuid=True),
server_default=sa.text("uuid_generate_v4()"),
nullable=False,
),
sa.Column("full_name", sa.String(length=256), nullable=False),
sa.Column("email", sa.String(length=100), nullable=False),
sa.Column("phone_number", sa.String(length=20), nullable=False),
sa.Column("call_after", sa.String(length=64), nullable=True),
sa.Column("call_before", sa.String(length=64), nullable=True),
sa.Column("comments", sa.Text(), nullable=True),
sa.Column(
"status",
sa.Enum("CREATED", "VERIFIED", "REJECTED", name="verificationstatus"),
server_default="CREATED",
nullable=False,
),
sa.Column(
"created_at",
postgresql.TIMESTAMP(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.Column(
"updated_at",
postgresql.TIMESTAMP(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.Column("system_comments", sa.Text(), nullable=True),
sa.PrimaryKeyConstraint("guid"),
)
op.create_table(
"languages",
sa.Column("name", sa.String(length=20), nullable=True),
sa.Column("code2", sa.String(length=2), nullable=False),
sa.Column("code3", sa.String(length=3), nullable=True),
sa.PrimaryKeyConstraint("code2"),
)
op.create_table(
"teammembers",
sa.Column(
"guid",
postgresql.UUID(as_uuid=True),
server_default=sa.text("uuid_generate_v4()"),
nullable=False,
),
sa.Column("full_name", sa.String(length=100), nullable=True),
sa.Column("phone_number", sa.String(length=20), nullable=True),
sa.PrimaryKeyConstraint("guid"),
)
op.create_table(
"accommodation_units",
sa.Column(
"guid",
postgresql.UUID(as_uuid=True),
server_default=sa.text("uuid_generate_v4()"),
nullable=False,
),
sa.Column("host_id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("city", sa.String(length=50), nullable=True),
sa.Column("zip", sa.String(length=10), nullable=False),
sa.Column(
"voivodeship",
sa.Enum(
"DOLNOSLASKIE",
"KUJAWSKOPOMORSKIE",
"LUBELSKIE",
"LUBUSKIE",
"LODZKIE",
"MALOPOLSKIE",
"MAZOWIECKIE",
"OPOLSKIE",
"PODKARPACKIE",
"PODLASKIE",
"POMORSKIE",
"SLASKIE",
"SWIETOKRZYSKIE",
"WARMINSKOMAZURSKIE",
"WIELKOPOLSKIE",
"ZACHODNIOPOMORSKIE",
name="voivodeship",
),
nullable=True,
),
sa.Column("address_line", sa.String(length=512), nullable=False),
sa.Column("vacancies_total", sa.Integer(), nullable=False),
sa.Column("pets_present", sa.Boolean(), nullable=True),
sa.Column("pets_accepted", sa.Boolean(), nullable=True),
sa.Column("disabled_people_friendly", sa.Boolean(), nullable=True),
sa.Column("lgbt_friendly", sa.Boolean(), nullable=True),
sa.Column("parking_place_available", sa.Boolean(), nullable=True),
sa.Column("owner_comments", sa.Text(), nullable=True),
sa.Column("easy_ambulance_access", sa.Boolean(), nullable=True),
sa.Column("vacancies_free", sa.Integer(), nullable=True),
sa.Column("staff_comments", sa.Text(), nullable=True),
sa.Column(
"status",
sa.Enum("CREATED", "VERIFIED", "REJECTED", name="verificationstatus"),
server_default="CREATED",
nullable=False,
),
sa.Column("system_comments", sa.Text(), nullable=True),
sa.Column(
"created_at",
postgresql.TIMESTAMP(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.Column(
"updated_at",
postgresql.TIMESTAMP(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.ForeignKeyConstraint(["host_id"], ["hosts.guid"], name="fk_host"),
sa.PrimaryKeyConstraint("guid"),
)
op.create_table(
"host_languages",
sa.Column("language_code", sa.String(length=2), nullable=True),
sa.Column("host_id", postgresql.UUID(as_uuid=True), nullable=True),
sa.Column(
"guid",
postgresql.UUID(as_uuid=True),
server_default=sa.text("uuid_generate_v4()"),
nullable=False,
),
sa.ForeignKeyConstraint(["host_id"], ["hosts.guid"], name="fk_host"),
sa.ForeignKeyConstraint(
["language_code"], ["languages.code2"], name="fk_language"
),
sa.PrimaryKeyConstraint("guid"),
sa.UniqueConstraint("language_code", "host_id", name="lang_host_pair_unique"),
)
op.create_table(
"guests",
sa.Column(
"guid",
postgresql.UUID(as_uuid=True),
server_default=sa.text("uuid_generate_v4()"),
nullable=False,
),
sa.Column("full_name", sa.String(length=255), nullable=False),
sa.Column("email", sa.String(length=255), nullable=False),
sa.Column("phone_number", sa.String(length=20), nullable=False),
sa.Column(
"is_agent", sa.Boolean(), server_default=sa.text("false"), nullable=False
),
sa.Column("document_number", sa.String(length=255), nullable=True),
sa.Column(
"people_in_group", sa.Integer(), server_default=sa.text("1"), nullable=False
),
sa.Column(
"adult_male_count",
sa.Integer(),
server_default=sa.text("0"),
nullable=False,
),
sa.Column(
"adult_female_count",
sa.Integer(),
server_default=sa.text("0"),
nullable=False,
),
sa.Column("children_ages", postgresql.ARRAY(sa.Integer()), nullable=False),
sa.Column(
"have_pets", sa.Boolean(), server_default=sa.text("false"), nullable=False
),
sa.Column("pets_description", sa.String(length=255), nullable=True),
sa.Column("special_needs", sa.Text(), nullable=True),
sa.Column("food_allergies", sa.Text(), nullable=True),
sa.Column(
"meat_free_diet",
sa.Boolean(),
server_default=sa.text("false"),
nullable=False,
),
sa.Column(
"gluten_free_diet",
sa.Boolean(),
server_default=sa.text("false"),
nullable=False,
),
sa.Column(
"lactose_free_diet",
sa.Boolean(),
server_default=sa.text("false"),
nullable=False,
),
sa.Column("finance_status", sa.String(length=255), nullable=True),
sa.Column("how_long_to_stay", sa.String(length=255), nullable=True),
sa.Column("desired_destination", sa.String(length=255), nullable=True),
sa.Column(
"priority_status",
sa.Enum(
"DOES_NOT_RESPOND",
"ACCOMMODATION_NOT_NEEDED",
"EN_ROUTE_UA",
"EN_ROUTE_PL",
"IN_KRK",
"AT_R3",
"ACCOMMODATION_FOUND",
"UPDATED",
name="guestprioritystatus",
),
nullable=True,
),
sa.Column(
"priority_date",
postgresql.TIMESTAMP(timezone=True),
server_default=sa.text("now()"),
nullable=True,
),
sa.Column("staff_comments", sa.Text(), nullable=True),
sa.Column(
"verification_status",
sa.Enum("CREATED", "VERIFIED", "REJECTED", name="verificationstatus"),
server_default="CREATED",
nullable=False,
),
sa.Column("system_comments", sa.Text(), nullable=True),
sa.Column(
"created_at",
postgresql.TIMESTAMP(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.Column(
"updated_at",
postgresql.TIMESTAMP(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.Column(
"accommodation_unit_id", postgresql.UUID(as_uuid=True), nullable=True
),
sa.ForeignKeyConstraint(
["accommodation_unit_id"],
["accommodation_units.guid"],
name="fk_accommodation_unit_id",
),
sa.PrimaryKeyConstraint("guid"),
)
op.execute(
"""
INSERT INTO public.languages("name", code2, code3)
VALUES
('English', 'en', 'eng'),
('Ukrainian', 'uk', 'ukr'),
('Polish', 'pl', 'pol'),
('Russian', 'ru', 'rus');
"""
)
op.execute(
"""
DO
$$
BEGIN
IF NOT EXISTS (
SELECT FROM pg_catalog.pg_user
WHERE usename = 'apiserviceuser') THEN
CREATE USER ApiServiceUser WITH PASSWORD '<PASSWORD>';
ALTER DEFAULT PRIVILEGES IN SCHEMA public
GRANT select,insert,update,delete,truncate ON TABLES TO ApiServiceUser;
GRANT select,insert,update,delete,truncate ON ALL TABLES IN schema public TO ApiServiceUser;
REVOKE select,insert,update,delete,truncate ON public.languages from ApiServiceUser;
END IF;
END
$$;
"""
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("guests")
op.drop_table("host_languages")
op.drop_table("accommodation_units")
op.drop_table("teammembers")
op.drop_table("languages")
op.drop_table("hosts")
op.execute("DROP TYPE verificationstatus;")
op.execute("DROP TYPE voivodeship;")
op.execute("DROP TYPE guestprioritystatus;")
op.execute("DROP OWNED BY ApiServiceUser; DROP USER ApiServiceUser;")
# ### end Alembic commands ###
|
StarcoderdataPython
|
9711539
|
import os
import subprocess
import threading
import pwd
from django.conf import settings
from django.utils.datetime_safe import datetime
from django.db.models import Q
from hujan_ui.installers.models import Server, Inventory, GlobalConfig, Deployment
from hujan_ui.utils.global_config_writer import GlobalConfigWriter
from hujan_ui.utils.host_editor import HostEditor
from hujan_ui.utils.multinode_writer import MultiNodeWriter
from hujan_ui.utils.core import demote
class Deployer:
log_dir = settings.DEPLOYMENT_LOG_DIR
deploy_command = settings.KOLLA_COMMAND_DEPLOY
post_deploy_command = settings.KOLLA_COMMAND_POST_DEPLOY
deploy_user = settings.USER_EXEC_KOLLA_COMMAND
def __init__(self, deployment_model=None):
if not deployment_model:
self.deployment_model = self.current_deployment()
else:
self.deployment_model = deployment_model
def current_deployment(self):
"""
Get currently running deployment model
"""
return Deployment.objects.order_by('-id').first()
def is_deploying(self):
"""
Check if deploying in progress
"""
return self.deployment_model is not None and self.deployment_model.status == Deployment.DEPLOY_IN_PROGRESS
def get_log(self, from_line=0):
"""
Get log lines from current deployment
"""
assert self.deployment_model is not None
if os.path.exists(self._log_file_path()):
with open(self._log_file_path(), 'r') as f:
lines = f.readlines()
return lines[from_line:]
else:
return []
def _prepare_log_dir(self):
"""
Create log dir if not exist
"""
if not os.path.exists(self.log_dir):
os.mkdir(self.log_dir)
def _write_log(self, line):
"""
Write line to log file of deployment_model
"""
with open(self._log_file_path(), 'a+') as f:
f.write(line)
def _log_file_path(self):
"""
Return absolute log file path of current deployment model
"""
return os.path.join(self.log_dir, self.deployment_model.log_name)
def _output_reader_deploy(self, proc):
"""
Read process output
"""
self._write_log("Process Started\n")
for line in iter(proc.stdout.readline, b''):
line_str = line.decode('utf-8')
self._write_log(line_str)
proc.wait()
return_code = proc.returncode
self._write_log(f"Process exited with return code: {return_code}\n")
if return_code == 0:
self.deployment_model.status = Deployment.DEPLOY_SUCCESS
self.deployment_model.save()
else:
self.deployment_model.status = Deployment.DEPLOY_FAILED
self.deployment_model.save()
def _output_reader_post_deploy(self, proc):
self._write_log("Process Post Started\n")
for line in iter(proc.stdout.readline, b''):
line_str = line.decode('utf-8')
self._write_log(line_str)
proc.wait()
return_code = proc.returncode
if return_code == 0:
self.deployment_model.status = Deployment.POST_DEPLOY_SUCCESS
self.deployment_model.save()
else:
self.deployment_model.status = Deployment.DEPLOY_FAILED
self.deployment_model.save()
self._write_log(f"Process post deploy exited with return code: {return_code}\n")
def _start_deploy(self):
uid = pwd.getpwnam(self.deploy_user).pw_uid
gid = pwd.getpwnam(self.deploy_user).pw_gid
proc_kolla = subprocess.Popen(self.deploy_command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
preexec_fn=demote(uid, gid))
t = threading.Thread(target=self._output_reader_deploy, args=(proc_kolla,))
t.start()
return t
def _start_post_deploy(self):
uid = pwd.getpwnam(self.deploy_user).pw_uid
gid = pwd.getpwnam(self.deploy_user).pw_gid
proc = subprocess.Popen(self.post_deploy_command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
preexec_fn=demote(uid, gid))
t = threading.Thread(target=self._output_reader_post_deploy, args=(proc,))
t.start()
return t
def _create_deployment(self):
"""
Create deployment model
"""
timestamp = datetime.now().strftime("%d%m%Y-%H%M%S")
self.deployment_model = Deployment(
log_name=f"deploy-log-{timestamp}.log",
status=Deployment.DEPLOY_IN_PROGRESS
)
self.deployment_model.save()
def _prepare_files(self):
"""
Prepare file before deployment
"""
HostEditor.save_from_model(Server.objects.all())
MultiNodeWriter.save_from_model(Inventory.objects.all())
GlobalConfigWriter.save_from_model(GlobalConfig.objects.first())
def deploy(self):
self._prepare_files()
self._prepare_log_dir()
self._create_deployment()
self._start_deploy()
def post_deploy(self):
self._prepare_log_dir()
self._start_post_deploy()
def reset(self):
cs = GlobalConfigWriter()
he = HostEditor()
mn = MultiNodeWriter()
he.clear()
he.save()
mn.clear()
cs.clear()
|
StarcoderdataPython
|
6637299
|
<filename>nnblk/tree.py
import itertools
BLANK = ''
class Tree(object):
def __init__(self, operator, operand):
self.operator = operator
self.operand = operand
self.parent = None
self.children = list()
def add_child(self, child):
child.parent = self
self.children.append(child)
def visit(self):
for c in self.children:
yield from c.visit()
yield self
def __len__(self):
count = 1
for child in self.children:
count += len(child)
return count
def depth(self):
count = 1
for child in self.children:
count = max(count, 1 + child.depth())
return count
def pad(self, depth):
if depth < 1:
raise ValueError()
if depth == 1:
assert self.is_leaf()
return
if self.is_leaf():
self.add_child(Tree(BLANK, BLANK))
self.add_child(Tree(BLANK, BLANK))
for c in self.children:
c.pad(depth - 1)
return self
def prune(self):
for node in self.visit():
if node.operator == BLANK and node.operand == BLANK:
parent = node.parent
parent.children = [c for c in parent.children if c is not node]
def is_null(self):
return len(self.operator) == 0 and len(self.operand) == 0
def is_leaf(self):
return all([c.is_null() for c in self.children])
def __str__(self):
rep = '%s:%s' % (self.operator, self.operand)
if len(self.children) == 0:
return rep
return '(' + ', '.join([rep] + [c.__str__() for c in self.children]) + ')'
BLANK_NODE = Tree(BLANK, BLANK)
def merge_tree(trees):
ret = Tree(operator=[n.operator for n in trees], operand=[n.operand for n in trees])
for subtrees in itertools.zip_longest(*[n.children for n in trees], fillvalue=BLANK_NODE):
ret.add_child(merge_tree(subtrees))
return ret
|
StarcoderdataPython
|
368815
|
# SecurityGroupCorrector.py || part of ZocSec.SecurityAsCode.AWS
#
# An AWS Lambda for removing security groups that expose sensitive ports to the entire Internet
#
# Owner: Copyright © 2018 Zocdoc Inc. www.zocdoc.com
# Author: <NAME> @veggiespam
#
import boto3
from botocore.exceptions import ClientError
import json
def lambda_handler(event, context):
print("Event is: ", json.dumps(event))
source = event['detail']['eventSource']
if source != "ec2.amazonaws.com":
# wrong caller, just silently return
print("Wrong Filter on CT Events, source=", source)
return
allowed_event_list = [
'CreateSecurityGroup',
"AuthorizeSecurityGroupIngress"
]
event_name = event['detail']['eventName']
if not(event_name in allowed_event_list):
# wrong event, just silently return
print("Wrong Filter on CT Events, source=", source, " / event_name=", event_name)
return
resp = event['detail']['responseElements']
if (resp["_return"] != True):
# event was not a successful update, so we can ignore it.
print("event was not a successful update, so we can ignore it")
return
SG_id = 'invalid'
if event_name == 'CreateSecurityGroup':
SG_id = resp["groupId"]
elif event_name == 'AuthorizeSecurityGroupIngress':
SG_id = event['detail']['requestParameters']['groupId']
else:
# We shouldn't actually get here.
return
print("groupID = ", SG_id)
ec2 = boto3.resource('ec2')
security_group = ec2.SecurityGroup(SG_id)
sensitive_ports = [ 22, 3389, 54321 ] ; # your sensitive ports
ingress_list = security_group.ip_permissions
for perm in ingress_list:
# print(json.dumps(perm))
fromport=0
toport=0
ipprot=0
sensitive=False
if 'FromPort' in perm:
fromport = perm['FromPort']
if 'ToPort' in perm:
toport = perm['ToPort']
if 'IpProtocol' in perm:
ipprot = perm['IpProtocol']
if ipprot == "-1":
sensitive = True
# print("F:",fromport," T:",toport)
if fromport > 0:
for p in sensitive_ports:
if fromport <= p and p <= toport:
sensitive = True
if sensitive:
for r in perm['IpRanges']:
# this could be more complex, but 0000/0 catches 90% of the cases
if r['CidrIp'] == "0.0.0.0/0":
print("Ingress Rule violates policy, removed: ", json.dumps(perm))
try:
security_group.revoke_ingress(
CidrIp = r['CidrIp'],
IpProtocol = perm['IpProtocol'],
FromPort = fromport,
ToPort = toport,
# , DryRun = True
)
except ClientError as e:
if 'DryRunOperation' not in str(e):
print("Error: ", e)
raise
else:
print('DryRun: ', e)
print(json.dumps(ingress_list))
return
|
StarcoderdataPython
|
6474655
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import Int16
class seigyo(object):
def __init__(self):
self._pub_control = rospy.Publisher('/control', Int16, queue_size=1)
while(1):
self._ren()
def _ren(self):
print "1: start, 0: prepare"
s = input()
control = Int16()
control.data = s
self._pub_control.publish(control)
if __name__ == '__main__':
rospy.init_node('seigyo')
seigyo = seigyo()
try:
rospy.spin()
except KeyboardInterrupt:
pass
|
StarcoderdataPython
|
8192900
|
<reponame>reiv/cachelib
import itertools
import pytest
from cachelib import ARCache
def identity(x):
return x
@pytest.fixture
def on_evict():
from collections import Counter
c = Counter()
def callback(key):
c[key] += 1
callback.count = c
return callback
class TestARC:
def test_arc(self):
# Adapted from doctests in:
# http://code.activestate.com/recipes/576532-adaptive-replacement-cache-in-python/
arc = ARCache(maxsize=10, get_missing=identity)
test_sequence = itertools.chain(
range(20), range(11, 15), range(20), range(11, 40),
(39, 38, 37, 36, 35, 34, 33, 32, 16, 17, 11, 41))
for x in test_sequence:
arc[x]
assert list(arc.t1) == [41]
assert list(arc.t2) == [11, 17, 16, 32, 33, 34, 35, 36, 37]
assert list(arc.b1) == [31, 30]
assert list(arc.b2) == [38, 39, 19, 18, 15, 14, 13, 12]
assert int(arc.p) == 5
assert 41 in arc
assert 32 in arc
assert 30 not in arc
assert 19 not in arc
assert arc.size == 10
def test_delitem(self):
arc = ARCache(maxsize=10, get_missing=identity)
with pytest.raises(KeyError):
del arc[42]
for x in range(10):
arc[x]
for x in range(10):
arc[x]
arc[11]
with pytest.raises(KeyError):
del arc[0]
for x in range(1, 10):
del arc[x]
del arc[11]
assert arc.size == 0
def test_eviction(self, on_evict):
arc = ARCache(maxsize=10, get_missing=identity, on_evict=on_evict)
count = on_evict.count
for x in range(10):
arc[x]
for x in range(1, 10):
arc[x]
arc[11]
assert count[0] == 1
assert 0 not in arc
assert 1 in arc
arc[12]
assert 11 not in arc
assert count[11] == 1
del arc[8]
assert count[8] == 0
assert arc.size == 9
|
StarcoderdataPython
|
3540065
|
# Empty file
from cemc.ce_calculator import CE, get_atoms_with_ce_calc
from cemc.ce_calculator import get_atoms_with_ce_calc_JSON
from cemc.timed_test_logging import TimeLoggingTestRunner
|
StarcoderdataPython
|
365869
|
<filename>py/torch_tensorrt/_util.py
from torch_tensorrt import __version__
from torch_tensorrt import _C
def dump_build_info():
"""Prints build information about the TRTorch distribution to stdout
"""
print(get_build_info())
def get_build_info() -> str:
"""Returns a string containing the build information of TRTorch distribution
Returns:
str: String containing the build information for TRTorch distribution
"""
build_info = _C.get_build_info()
build_info = "TRTorch Version: " + str(__version__) + '\n' + build_info
return build_info
def set_device(gpu_id):
_C.set_device(gpu_id)
|
StarcoderdataPython
|
3591860
|
<gh_stars>0
print("Day 1 - Python print Function")
print("The Function is declared like this :")
print("print('what to print')") # Practice makes men perfect
# Just like you train your muscle
# The data in Double qoutes "" is called Strings not code
# The single qoutes are almost the same as double qoutes
# The Great Programmers are great at finding errors and fix them
|
StarcoderdataPython
|
6682899
|
response = {
"result": {
"result": {
"order0": {
"ID": "35943",
"TITLE": "7-я Кожуховская ул., 4К1: 104%, 3.6, эт. 12/16, 10.5 -> 16.3 (от собственника)",
},
"order1": {
"ID": "161",
"TITLE": "преображенская площадь: 26.8 - 8.9 = 17.9 (201%)",
},
"order2": {
"ID": "171",
"TITLE": "вднх: 8.1 - 4.7 = 3.4 (71%)",
},
},
"result_error": [],
"result_total": [],
"result_next": [],
"result_time": {
"order0": {
"start": 1653240894.323743,
"finish": 1653240894.364974,
"duration": 0.04123091697692871,
"processing": 0.0411381721496582,
"date_start": "2022-05-22T20:34:54+03:00",
"date_finish": "2022-05-22T20:34:54+03:00",
"operating": 2.6955912113189697,
},
"order1": {
"start": 1653240894.36503,
"finish": 1653240894.369708,
"duration": 0.004678010940551758,
"processing": 0.004612922668457031,
"date_start": "2022-05-22T20:34:54+03:00",
"date_finish": "2022-05-22T20:34:54+03:00",
"operating": 0,
},
"order2": {
"start": 1653240894.369754,
"finish": 1653240894.374263,
"duration": 0.00450897216796875,
"processing": 0.004456043243408203,
"date_start": "2022-05-22T20:34:54+03:00",
"date_finish": "2022-05-22T20:34:54+03:00",
"operating": 0,
},
},
},
"time": {
"start": 1653240894.282294,
"finish": 1653240894.374298,
"duration": 0.09200406074523926,
"processing": 0.05059194564819336,
"date_start": "2022-05-22T20:34:54+03:00",
"date_finish": "2022-05-22T20:34:54+03:00",
"operating": 2.6955912113189697,
},
}
|
StarcoderdataPython
|
3496534
|
import serial,time
class get_co_data:
def __init__(self, device='/dev/ttyS0', baudrate=9600, timeout=1):
self.ser = serial.Serial(device, baudrate=baudrate, timeout=timeout)
self.ser.flush()
def repeat_get_data(self):
float_values = ['ADC_In', 'Voltage_ADC', 'Resistance_RS', 'Ratio (RS/R0)', 'PPM']
while True:
if self.ser.in_waiting > 0:
line = self.ser.readline().decode('utf-8').rstrip()
items = [it.strip() for it in line.split("|")]
items = [it for it in items if it!=""]
if len(items)==9:
items = dict(zip(
['ADC_In', 'Equation_V_ADC', 'Voltage_ADC', 'Equation_RS', 'Resistance_RS', 'EQ_Ratio', 'Ratio (RS/R0)', 'Equation_PPM', 'PPM'],
items
))
items['time'] = time.ctime()
items['timestamp'] = time.time()
items['orig_line'] = line
for key in float_values:
items[key] = float(items[key])
yield items
else:
continue
if __name__ == '__main__':
getco = get_co_data()
data = getco.repeat_get_data()
for i in range(10):
f = next(data)
print(f)
|
StarcoderdataPython
|
1911938
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 30 13:45:39 2020
the version of loompy should be the consistent with velocyto; otherwise error returned
@author: jingkui.wang
"""
import loompy
import glob
files = glob.glob("/Volumes/groups/cochella/git_aleks_jingkui/scRNAseq_MS_lineage/data/raw_ngs_data/S117008_R9533/LOOMS/*.loom")
loompy.combine(files,
"/Volumes/groups/cochella/git_aleks_jingkui/scRNAseq_MS_lineage/data/raw_ngs_data/S117008_R9533/LOOMS/S117008_R9533_merged.loom")
files = glob.glob("/Volumes/groups/cochella/git_aleks_jingkui/scRNAseq_MS_lineage/data/raw_ngs_data/S117007_R9533/LOOMS/*.loom")
loompy.combine(files,
"/Volumes/groups/cochella/git_aleks_jingkui/scRNAseq_MS_lineage/data/raw_ngs_data/S117007_R9533/LOOMS/S117007_R9533_merged.loom")
files = glob.glob("/Volumes/groups/cochella/git_aleks_jingkui/scRNAseq_MS_lineage/data/raw_ngs_data/S117009_R9533/LOOMS/*.loom")
loompy.combine(files,
"/Volumes/groups/cochella/git_aleks_jingkui/scRNAseq_MS_lineage/data/raw_ngs_data/S117009_R9533/LOOMS/S117009_R9533_merged.loom")
# folder S124890_R9968
files = glob.glob("/Volumes/groups/cochella/git_aleks_jingkui/scRNAseq_MS_lineage/data/raw_ngs_data/S124890_R9968/LOOMS/*.loom")
loompy.combine(files,
"/Volumes/groups/cochella/git_aleks_jingkui/scRNAseq_MS_lineage/data/raw_ngs_data/S124890_R9968/LOOMS/S124890_R9968_merged.loom")
# folder S124889_R9968
files = glob.glob("/Volumes/groups/cochella/git_aleks_jingkui/scRNAseq_MS_lineage/data/raw_ngs_data/S124889_R9968/LOOMS/*.loom")
loompy.combine(files,
"/Volumes/groups/cochella/git_aleks_jingkui/scRNAseq_MS_lineage/data/raw_ngs_data/S124889_R9968/LOOMS/S124889_R9968_merged.loom")
# folder S124891_R9968
files = glob.glob("/Volumes/groups/cochella/git_aleks_jingkui/scRNAseq_MS_lineage/data/raw_ngs_data/S124891_R9968/LOOMS/*.loom")
loompy.combine(files,
"/Volumes/groups/cochella/git_aleks_jingkui/scRNAseq_MS_lineage/data/raw_ngs_data/S124891_R9968/LOOMS/S124891_R9968_merged.loom")
# here combine merged loom files from all folders
files = glob.glob("/Volumes/groups/cochella/git_aleks_jingkui/scRNAseq_MS_lineage/data/raw_ngs_data/*/LOOMS/*merged.loom")
loompy.combine(files, '/Volumes/groups/cochella/git_aleks_jingkui/scRNAseq_MS_lineage/data/raw_ngs_data/all_merged.loom')
loompy.combine(files, '/Users/jiwang/workspace/imp/scRNAseq_MS_lineage_dev/data/velocyto_all_merged.loom')
ds = loompy.connect('/Users/jiwang/workspace/imp/scRNAseq_MS_lineage_dev/data/velocyto_all_merged.loom')
ds.shape
|
StarcoderdataPython
|
335196
|
#!/usr/bin/env python3
from itertools import combinations
from hashlib import md5
from numpy import lcm
from collections import defaultdict
from AoCUtils import getInts
COORDS = {'x', 'y', 'z'}
def sumVelToPos(planets):
for p in planets:
for v in COORDS:
p['pos'][v] += p['vel'][v]
def hashPlanets(planets, cord):
a = []
for p in planets:
a.append(str(p['pos'][cord]))
a.append(str(p['vel'][cord]))
h = ' '.join(a)
hashed = md5(h.encode()).hexdigest()
return bytes(hashed, 'utf-8')
def getEnergy(planet):
pot = sum([ abs(v) for k,v in planet['pos'].items()])
kin = sum([ abs(v) for k,v in planet['vel'].items()])
return pot*kin
def simulation(planets, pairs, steps, p2=False):
if p2:
found = set()
hashes = defaultdict(set)
cords = {'x':None, 'y':None, 'z':None}
for s in range(steps):
for p in pairs:
for c in COORDS:
if planets[p[0]]['pos'][c] > planets[p[1]]['pos'][c]:
planets[p[0]]['vel'][c] -= 1
planets[p[1]]['vel'][c] += 1
elif planets[p[0]]['pos'][c] < planets[p[1]]['pos'][c]:
planets[p[0]]['vel'][c] += 1
planets[p[1]]['vel'][c] -= 1
sumVelToPos(planets)
if p2:
if len(found) != 3:
for c in COORDS:
h = hashPlanets(planets, c)
if h in hashes[c] and c not in found:
print("planets have the same pos and vol at step {} for {}".format(s, c))
cords[c] = s
found.add(c)
else:
hashes[c].add(h)
else:
return cords
def main():
data = getInts('input')
planets = list()
i = 0
for planet in data:
a = {}
a['pos'] = dict(zip(['x','y','z'], planet ))
a['vel'] = {'x':0, 'y':0, 'z':0}
a['id'] = i
planets.append( a )
i+= 1
pairs = list(combinations(range(len(planets)), 2))
planets2 = planets.copy()
# part 1
simulation(planets, pairs, 1000)
p1 = sum([ getEnergy(x) for x in planets ])
print("p1: {}".format(p1))
# part 2
p2_s = simulation(planets2, pairs, 9999999999, True)
p2 = lcm.reduce([x for x in p2_s.values()])
print("p2: {}".format(p2))
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1671026
|
<filename>butterfly/complex_utils.py
''' Utility functions for handling complex tensors: conjugate and complex_mul.
Pytorch (as of 1.0) does not support complex tensors, so we store them as
float tensors where the last dimension is 2 (real and imaginary parts).
'''
import numpy as np
import torch
from torch.utils.dlpack import to_dlpack, from_dlpack
# For now, it seems that the overhead of converting to cupy makes the cupy
# version slower than the torch version. So I'll just disable cupy.
# Check if cupy is available
# if torch.cuda.is_available():
# use_cupy = True
# try:
# import cupy as cp
# except:
# use_cupy = False
# print("Cupy isn't installed or isn't working properly. Will use Pytorch's complex multiply, which is slower.")
# else:
# use_cupy = False
use_cupy = False
def torch2numpy(X):
"""Convert a torch float32 tensor to a numpy array, sharing the same memory.
"""
return X.detach().numpy()
def torch2cupy(tensor):
return cp.fromDlpack(to_dlpack(tensor.cuda()))
def cupy2torch(tensor):
return from_dlpack(tensor.toDlpack())
def real_to_complex(X):
"""A version of X that's complex (i.e., last dimension is 2).
Parameters:
X: (...) tensor
Return:
X_complex: (..., 2) tensor
"""
return torch.stack((X, torch.zeros_like(X)), dim=-1)
def conjugate_torch(X):
assert X.shape[-1] == 2, 'Last dimension must be 2'
return X * torch.tensor((1, -1), dtype=X.dtype, device=X.device)
class Conjugate(torch.autograd.Function):
'''X is a complex64 tensors but stored as float32 tensors, with last dimension = 2.
'''
@staticmethod
def forward(ctx, X):
assert X.shape[-1] == 2, 'Last dimension must be 2'
if X.is_cuda:
if use_cupy:
# TODO: do we need .contiguous here? I think it doesn't work if the last dimension isn't contiguous
return cupy2torch(torch2cupy(X).view('complex64').conj().view('float32'))
else:
return conjugate_torch(X)
else:
return torch.from_numpy(np.ascontiguousarray(torch2numpy(X)).view('complex64').conj().view('float32'))
@staticmethod
def backward(ctx, grad):
return Conjugate.apply(grad)
conjugate = Conjugate.apply
def complex_mul_torch(X, Y):
assert X.shape[-1] == 2 and Y.shape[-1] == 2, 'Last dimension must be 2'
return torch.stack(
(X[..., 0] * Y[..., 0] - X[..., 1] * Y[..., 1],
X[..., 0] * Y[..., 1] + X[..., 1] * Y[..., 0]),
dim=-1)
def complex_mul_numpy(X, Y):
assert X.shape[-1] == 2 and Y.shape[-1] == 2, 'Last dimension must be 2'
X_np = np.ascontiguousarray(torch2numpy(X)).view('complex64')
Y_np = np.ascontiguousarray(torch2numpy(Y)).view('complex64')
return torch.from_numpy((X_np * Y_np).view('float32'))
class ComplexMul(torch.autograd.Function):
'''X and Y are complex64 tensors but stored as float32 tensors, with last dimension = 2.
'''
@staticmethod
def forward(ctx, X, Y):
assert X.shape[-1] == 2 and Y.shape[-1] == 2, 'Last dimension must be 2'
ctx.save_for_backward(X, Y)
if X.is_cuda:
assert Y.is_cuda, 'X and Y must both be torch.cuda.FloatTensor'
if use_cupy:
# TODO: do we need .contiguous here? I think it doesn't work if the last dimension isn't contiguous
return cupy2torch((torch2cupy(X).view('complex64') * torch2cupy(Y).view('complex64')).view('float32'))
else:
return complex_mul_torch(X, Y)
else:
assert not Y.is_cuda, 'X and Y must both be torch.FloatTensor'
X_np = np.ascontiguousarray(torch2numpy(X)).view('complex64')
Y_np = np.ascontiguousarray(torch2numpy(Y)).view('complex64')
return torch.from_numpy((X_np * Y_np).view('float32'))
@staticmethod
def backward(ctx, grad):
X, Y = ctx.saved_tensors
grad_X, grad_Y = ComplexMul.apply(grad, conjugate(Y)), ComplexMul.apply(grad, conjugate(X))
# Need to sum over dimensions that were broadcasted
dims_to_sum_X = [-i for i in range(1, X.dim() + 1) if X.shape[-i] != grad.shape[-i]]
dims_to_sum_Y = [-i for i in range(1, Y.dim() + 1) if Y.shape[-i] != grad.shape[-i]]
if dims_to_sum_X: # If empty list is passed to sum, it sums all the dimensions
grad_X = grad_X.sum(dim=dims_to_sum_X, keepdim=True)
if dims_to_sum_Y: # If empty list is passed to sum, it sums all the dimensions
grad_Y = grad_Y.sum(dim=dims_to_sum_Y, keepdim=True)
if grad.dim() > X.dim():
grad_X = grad_X.sum(tuple(range(grad.dim() - X.dim())))
if grad.dim() > Y.dim():
grad_Y = grad_Y.sum(tuple(range(grad.dim() - Y.dim())))
return grad_X, grad_Y
complex_mul = ComplexMul.apply
def test_complex_mul():
n = 5
m = 7
p = 4
X = torch.rand(m, 1, n, 2, requires_grad=True).transpose(0, 2) # Transpose to test non-contiguous arrays
Y = torch.rand(m, p, 2, requires_grad=True).transpose(0, 1)
Z = complex_mul(X, Y)
Z_torch = complex_mul_torch(X, Y)
assert Z.shape == (n, p, m, 2)
assert torch.allclose(Z, Z_torch)
g = torch.rand_like(Z)
dX, dY = torch.autograd.grad(Z, (X, Y), g)
dX_torch, dY_torch = torch.autograd.grad(Z_torch, (X, Y), g)
assert torch.allclose(dX, dX_torch)
assert torch.allclose(dY, dY_torch)
if torch.cuda.is_available():
X, Y = X.cuda(), Y.cuda()
Z = complex_mul(X, Y)
Z_torch = complex_mul_torch(X, Y)
assert Z.shape == (n, p, m, 2)
assert torch.allclose(Z, Z_torch)
g = torch.rand_like(Z)
dX, dY = torch.autograd.grad(Z, (X, Y), g)
dX_torch, dY_torch = torch.autograd.grad(Z_torch, (X, Y), g)
assert torch.allclose(dX, dX_torch)
assert torch.allclose(dY, dY_torch)
def complex_matmul_torch(X, Y):
"""Multiply two complex matrices.
Parameters:
X: (..., n, m, 2)
Y: (..., m, p, 2)
Return:
Z: (..., n, p, 2)
"""
return complex_mul_torch(X.unsqueeze(-2), Y.unsqueeze(-4)).sum(dim=-3)
class ComplexMatmulNp(torch.autograd.Function):
"""Multiply two complex matrices, in numpy.
Parameters:
X: (n, m, 2)
Y: (m, p, 2)
Return:
Z: (n, p, 2)
"""
@staticmethod
def forward(ctx, X, Y):
ctx.save_for_backward(X, Y)
X_np = np.ascontiguousarray(torch2numpy(X)).view('complex64').squeeze(-1)
Y_np = np.ascontiguousarray(torch2numpy(Y)).view('complex64').squeeze(-1)
prod = torch.from_numpy((X_np @ Y_np)[..., None].view('float32'))
return prod
@staticmethod
def backward(ctx, grad):
X, Y = ctx.saved_tensors
X_np = X.detach().contiguous().numpy().view('complex64').squeeze(-1)
Y_np = Y.detach().contiguous().numpy().view('complex64').squeeze(-1)
grad_np = grad.detach().contiguous().numpy().view('complex64').squeeze(-1)
dX = torch.from_numpy(np.expand_dims(grad_np @ Y_np.conj().T, -1).view('float32'))
dY = torch.from_numpy(np.expand_dims(X_np.conj().T @ grad_np, -1).view('float32'))
return dX, dY
complex_matmul = ComplexMatmulNp.apply
def test_complex_mm():
n = 5
m = 7
p = 4
X = torch.rand(n, m, 2, requires_grad=True)
Y = torch.rand(m, p, 2, requires_grad=True)
Z = complex_matmul(X, Y)
assert Z.shape == (n, p, 2)
batch_size = 3
# X = torch.rand(batch_size, n, m, 2)
# Y = torch.rand(batch_size, m, p, 2)
# Z = complex_matmul(X, Y)
# assert Z.shape == (batch_size, n, p, 2)
X_np = X.detach().contiguous().numpy().view('complex64').squeeze(-1)
Y_np = Y.detach().contiguous().numpy().view('complex64').squeeze(-1)
Z_np = np.expand_dims(X_np @ Y_np, axis=-1).view('float32')
assert np.allclose(Z.detach().numpy(), Z_np)
Z_torch = complex_matmul_torch(X, Y)
assert torch.allclose(Z, Z_torch)
g = torch.rand_like(Z)
dX, dY = torch.autograd.grad(Z, (X, Y), g)
dX_torch, dY_torch = torch.autograd.grad(Z_torch, (X, Y), g)
assert torch.allclose(dX, dX_torch)
assert torch.allclose(dY, dY_torch)
if __name__ == '__main__':
test_complex_mul()
test_complex_mm()
|
StarcoderdataPython
|
6400848
|
<reponame>kairu-ms/autorest.az
#!/usr/bin/env python
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import os
import sys
if not sys.version_info >= (3, 6, 0):
raise Exception(
"Autorest for Python extension requires Python 3.6 at least")
try:
import pip
except ImportError:
raise Exception("Your Python installation doesn't have pip available")
try:
import venv
except ImportError:
raise Exception("Your Python installation doesn't have venv available")
# Now we have pip and Py >= 3.6, go to work
import subprocess
from pathlib import Path
from venvtools import ExtendedEnvBuilder, python_run
_ROOT_DIR = Path(__file__).parent.parent.parent.parent
def main():
venv_path = _ROOT_DIR / "venv"
venv_prexists = venv_path.exists()
if venv_prexists:
env_builder = venv.EnvBuilder(with_pip=True)
venv_context = env_builder.ensure_directories(venv_path)
else:
env_builder = ExtendedEnvBuilder(with_pip=True)
env_builder.create(venv_path)
venv_context = env_builder.context
python_run(venv_context, "pip", ["install", "-U", "pip"])
python_run(venv_context, "pip", ["install", "-r", os.path.join("dist", "src", "python", "requirements.txt")])
def lint(filename):
venv_path = _ROOT_DIR / "venv"
venv_prexists = venv_path.exists()
if not venv_prexists:
main()
env_builder = venv.EnvBuilder(with_pip=True)
venv_context = env_builder.ensure_directories(venv_path)
python_run(venv_context, "pip", ["install", "-U", "pip"])
python_run(venv_context, "pip", ["install", "-r", os.path.join("dist", "src", "python", "requirements.txt")])
# black --line-length=120 --experimental-string-processing --skip-string-normalization
# autopep8 --global-config '.pyproject.toml' --in-place --max-line-length=120 --ignore="E203,E501,W6"
# autoflake --in-place --expand-star-imports --remove-all-unused-imports --remove-duplicate-keys --remove-unused-variables
python_run(venv_context, "black", [
"--line-length=120", "--experimental-string-processing", "--skip-string-normalization", filename])
python_run(venv_context, "autopep8", [
"--in-place", '--exclude=".git,__pycache__"', "--max-line-length=120", '--ignore="E203,E501,W6"', filename])
python_run(venv_context, "autoflake", ["--in-place", "--expand-star-imports",
"--remove-all-unused-imports", "--remove-duplicate-keys", "--remove-unused-variables", filename])
if __name__ == "__main__":
if len(sys.argv) < 2:
main()
elif os.path.exists(sys.argv[1]):
lint(sys.argv[1])
|
StarcoderdataPython
|
1782544
|
<gh_stars>10-100
#=========================================================================
# RegincrNstage_test
#=========================================================================
import collections
import pytest
from random import sample
from pymtl import *
from pclib.test import run_test_vector_sim, mk_test_case_table
from RegIncrNstage import RegIncrNstage
#-------------------------------------------------------------------------
# mk_test_vector_table
#-------------------------------------------------------------------------
def mk_test_vector_table( nstages, inputs ):
inputs.extend( [0]*nstages )
test_vector_table = [ ('in_ out*') ]
last_results = collections.deque( ['?']*nstages )
for input_ in inputs:
test_vector_table.append( [ input_, last_results.popleft() ] )
last_results.append( Bits( 8, input_ + nstages, trunc=True ) )
return test_vector_table
#-------------------------------------------------------------------------
# Parameterized Testing with Test Case Table
#-------------------------------------------------------------------------
test_case_table = mk_test_case_table([
( "nstages inputs "),
[ "2stage_small", 2, [ 0x00, 0x03, 0x06 ] ],
[ "2stage_large", 2, [ 0xa0, 0xb3, 0xc6 ] ],
[ "2stage_overflow", 2, [ 0x00, 0xfe, 0xff ] ],
[ "2stage_random", 2, sample(range(0xff),20) ],
[ "3stage_small", 3, [ 0x00, 0x03, 0x06 ] ],
[ "3stage_large", 3, [ 0xa0, 0xb3, 0xc6 ] ],
[ "3stage_overflow", 3, [ 0x00, 0xfe, 0xff ] ],
[ "3stage_random", 3, sample(range(0xff),20) ],
])
@pytest.mark.parametrize( **test_case_table )
def test( test_params, dump_vcd ):
nstages = test_params.nstages
inputs = test_params.inputs
run_test_vector_sim( RegIncrNstage( nstages ),
mk_test_vector_table( nstages, inputs ), dump_vcd )
#-------------------------------------------------------------------------
# Parameterized Testing of With nstages = [ 1, 2, 3, 4, 5, 6 ]
#-------------------------------------------------------------------------
@pytest.mark.parametrize( "n", [ 1, 2, 3, 4, 5, 6 ] )
def test_random( n, dump_vcd ):
run_test_vector_sim( RegIncrNstage( nstages=n ),
mk_test_vector_table( n, sample(range(0xff),20) ), dump_vcd )
|
StarcoderdataPython
|
11286695
|
# Time: O(n) on average
# Space: O(1)
# 973
# We have a list of points on the plane. Find the K closest points to the origin (0, 0).
# (Here, the distance between two points on a plane is the Euclidean distance.)
#
# You may return the answer in any order. The answer is guaranteed to be unique (except for
# the order that it is in.)
# Solution 1: quick select
# We want an algorithm faster than NlogN. Clearly, the only way to do this is to use the fact that
# the K elements returned can be in any order -- otherwise we would be sorting which is at least NlogN.
#
# Say we choose some random element x = A[i] and split the array into two buckets: one bucket of all
# the elements less than x, and another bucket of all the elements greater than or equal to x.
# This is known as "quickselecting by a pivot x".
#
# The idea is that if we quickselect by some pivot, on average in linear time we'll reduce the problem
# to a problem of half the size.
#
# Algorithm
# Let's do the work(i, j, K) of partially sorting the subarray (points[i], points[i+1], ..., points[j])
# so that the smallest K elements of this subarray occur in the first K positions (i, i+1, ..., i+K-1).
#
# First, we quickselect by a random pivot element from the subarray. To do this in place, we have two pointers
# i and j, and move these pointers to the elements that are in the wrong bucket -- then, we swap these elements.
#
# After, we have two buckets [oi, i] and [i+1, oj], where (oi, oj) are the original (i, j) values when
# calling work(i, j, K). Say the first bucket has 10 items and the second bucket has 15 items. If we were
# trying to partially sort say, K = 5 items, then we only need to partially sort the first bucket: work(oi, i, 5).
# Otherwise, if we were trying to partially sort say, K = 17 items, then the first 10 items are already
# partially sorted, and we only need to partially sort the next 7 items: work(i+1, oj, 7).
from random import randint
class Solution(object):
def kClosest(self, points, K): # recursion, leetcodeOfficial: 1400 ms
"""
:type points: List[List[int]]
:type K: int
:rtype: List[List[int]]
"""
dist = lambda i: points[i][0]**2 + points[i][1]**2
def sort(i, j, K):
# Partially sorts A[i:j+1] so the first K elements are
# the smallest K elements.
if i >= j: return
# Put random element as A[i] - this is the pivot
k = randint(i, j)
points[i], points[k] = points[k], points[i]
mid = partition(i, j)
if K < mid - i + 1:
sort(i, mid - 1, K)
elif K > mid - i + 1:
sort(mid + 1, j, K - (mid - i + 1))
def partition(i, j):
# Partition by pivot A[i], returning an index mid
# such that A[i] <= A[mid] <= A[j] for i < mid < j.
oi = i
pivot = dist(i)
i += 1
while True:
while i < j and dist(i) < pivot:
i += 1
while i <= j and dist(j) >= pivot:
j -= 1
if i >= j: break
points[i], points[j] = points[j], points[i]
points[oi], points[j] = points[j], points[oi] # points[j] always < pivot dist, so swap to the beginning
return j
sort(0, len(points) - 1, K)
return points[:K]
def kClosest_iteration(self, points, K):
def dist(point):
return point[0]**2 + point[1]**2
def kthElement(k, compare):
def PartitionAroundPivot(left, right, pivot_idx, compare):
new_pivot_idx = left
points[pivot_idx], points[right] = points[right], points[pivot_idx]
for i in xrange(left, right):
if compare(points[i], points[right]):
points[i], points[new_pivot_idx] = points[new_pivot_idx], points[i]
new_pivot_idx += 1
points[right], points[new_pivot_idx] = points[new_pivot_idx], points[right]
return new_pivot_idx
left, right = 0, len(points) - 1
while left <= right:
pivot_idx = randint(left, right)
new_pivot_idx = PartitionAroundPivot(left, right, pivot_idx, compare)
if new_pivot_idx == k:
return
elif new_pivot_idx > k:
right = new_pivot_idx - 1
else: # new_pivot_idx < k.
left = new_pivot_idx + 1
kthElement(K-1, lambda a, b: dist(a) < dist(b))
return points[:K]
# Time: O(nlogk)
# Space: O(k)
import heapq
class Solution2(object): # USE THIS 400 ms
def kClosest(self, points, K):
"""
:type points: List[List[int]]
:type K: int
:rtype: List[List[int]]
"""
def dist(point):
return point[0]**2 + point[1]**2
max_heap = []
for point in points:
heapq.heappush(max_heap, (-dist(point), point))
if len(max_heap) > K:
heapq.heappop(max_heap)
return [x[1] for x in max_heap]
# Time: O(nlogn)
# Space: O(1)
class Solution3(object):
def kClosest(self, points, K):
points.sort(key=lambda p: p[0]**2 + p[1]**2)
return points[:K]
|
StarcoderdataPython
|
8116995
|
<filename>tests/cluster/test_cluster_utlis.py<gh_stars>10-100
from cluster_setup import *
def test_sizes_from_labels():
labels = jnp.array([0, 0, 1, 1, 2, 2])
sizes = cluster.sizes_from_labels_jit(labels, 3)
assert_array_equal(sizes, jnp.array([2, 2, 2]))
def test_start_end_indices():
sizes = jnp.array([4, 4])
starts, ends = cluster.start_end_indices(sizes)
assert_array_equal(starts, jnp.array([0, 4]))
assert_array_equal(ends, jnp.array([4, 8]))
def test_labels_from_sizes():
sizes = jnp.array([2, 2])
labels = cluster.labels_from_sizes(sizes)
assert_array_equal(labels, jnp.array([0, 0, 1, 1]))
def test_best_map():
true_labels = jnp.array([0, 0, 1, 1])
pred_labels = jnp.array([1, 1, 0, 0])
mapped_labels, cols, G = cluster.best_map(true_labels, pred_labels)
assert_array_equal(mapped_labels, true_labels)
def test_best_map_k():
true_labels = jnp.array([0, 0, 1, 1])
pred_labels = jnp.array([1, 1, 0, 0])
mapped_labels, cols, G = cluster.best_map_k(true_labels, pred_labels, 2)
assert_array_equal(mapped_labels, true_labels)
def test_cluster_error():
true_labels = jnp.array([0, 0, 1, 1])
pred_labels = jnp.array([1, 1, 0, 0])
error = cluster.clustering_error(true_labels, pred_labels)
assert error.error == 0
assert error.error_perc == 0
s = str(error)
def test_cluster_error_k():
true_labels = jnp.array([0, 0, 1, 1])
pred_labels = jnp.array([1, 1, 0, 0])
error = cluster.clustering_error_k(true_labels, pred_labels, 2)
assert error.error == 0
assert error.error_perc == 0
|
StarcoderdataPython
|
1728898
|
<reponame>pulumi/pulumi-rancher2<filename>sdk/python/pulumi_rancher2/registry.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['RegistryArgs', 'Registry']
@pulumi.input_type
class RegistryArgs:
def __init__(__self__, *,
project_id: pulumi.Input[str],
registries: pulumi.Input[Sequence[pulumi.Input['RegistryRegistryArgs']]],
annotations: Optional[pulumi.Input[Mapping[str, Any]]] = None,
description: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace_id: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Registry resource.
:param pulumi.Input[str] project_id: The project id where to assign the registry (string)
:param pulumi.Input[Sequence[pulumi.Input['RegistryRegistryArgs']]] registries: Registries data for registry (list)
:param pulumi.Input[Mapping[str, Any]] annotations: Annotations for Registry object (map)
:param pulumi.Input[str] description: A registry description (string)
:param pulumi.Input[Mapping[str, Any]] labels: Labels for Registry object (map)
:param pulumi.Input[str] name: The name of the registry (string)
:param pulumi.Input[str] namespace_id: The namespace id where to assign the namespaced registry (string)
"""
pulumi.set(__self__, "project_id", project_id)
pulumi.set(__self__, "registries", registries)
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
if description is not None:
pulumi.set(__self__, "description", description)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if name is not None:
pulumi.set(__self__, "name", name)
if namespace_id is not None:
pulumi.set(__self__, "namespace_id", namespace_id)
@property
@pulumi.getter(name="projectId")
def project_id(self) -> pulumi.Input[str]:
"""
The project id where to assign the registry (string)
"""
return pulumi.get(self, "project_id")
@project_id.setter
def project_id(self, value: pulumi.Input[str]):
pulumi.set(self, "project_id", value)
@property
@pulumi.getter
def registries(self) -> pulumi.Input[Sequence[pulumi.Input['RegistryRegistryArgs']]]:
"""
Registries data for registry (list)
"""
return pulumi.get(self, "registries")
@registries.setter
def registries(self, value: pulumi.Input[Sequence[pulumi.Input['RegistryRegistryArgs']]]):
pulumi.set(self, "registries", value)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Annotations for Registry object (map)
"""
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "annotations", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A registry description (string)
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Labels for Registry object (map)
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the registry (string)
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="namespaceId")
def namespace_id(self) -> Optional[pulumi.Input[str]]:
"""
The namespace id where to assign the namespaced registry (string)
"""
return pulumi.get(self, "namespace_id")
@namespace_id.setter
def namespace_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "namespace_id", value)
@pulumi.input_type
class _RegistryState:
def __init__(__self__, *,
annotations: Optional[pulumi.Input[Mapping[str, Any]]] = None,
description: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace_id: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
registries: Optional[pulumi.Input[Sequence[pulumi.Input['RegistryRegistryArgs']]]] = None):
"""
Input properties used for looking up and filtering Registry resources.
:param pulumi.Input[Mapping[str, Any]] annotations: Annotations for Registry object (map)
:param pulumi.Input[str] description: A registry description (string)
:param pulumi.Input[Mapping[str, Any]] labels: Labels for Registry object (map)
:param pulumi.Input[str] name: The name of the registry (string)
:param pulumi.Input[str] namespace_id: The namespace id where to assign the namespaced registry (string)
:param pulumi.Input[str] project_id: The project id where to assign the registry (string)
:param pulumi.Input[Sequence[pulumi.Input['RegistryRegistryArgs']]] registries: Registries data for registry (list)
"""
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
if description is not None:
pulumi.set(__self__, "description", description)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if name is not None:
pulumi.set(__self__, "name", name)
if namespace_id is not None:
pulumi.set(__self__, "namespace_id", namespace_id)
if project_id is not None:
pulumi.set(__self__, "project_id", project_id)
if registries is not None:
pulumi.set(__self__, "registries", registries)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Annotations for Registry object (map)
"""
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "annotations", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A registry description (string)
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Labels for Registry object (map)
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the registry (string)
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="namespaceId")
def namespace_id(self) -> Optional[pulumi.Input[str]]:
"""
The namespace id where to assign the namespaced registry (string)
"""
return pulumi.get(self, "namespace_id")
@namespace_id.setter
def namespace_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "namespace_id", value)
@property
@pulumi.getter(name="projectId")
def project_id(self) -> Optional[pulumi.Input[str]]:
"""
The project id where to assign the registry (string)
"""
return pulumi.get(self, "project_id")
@project_id.setter
def project_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project_id", value)
@property
@pulumi.getter
def registries(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RegistryRegistryArgs']]]]:
"""
Registries data for registry (list)
"""
return pulumi.get(self, "registries")
@registries.setter
def registries(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RegistryRegistryArgs']]]]):
pulumi.set(self, "registries", value)
class Registry(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
annotations: Optional[pulumi.Input[Mapping[str, Any]]] = None,
description: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace_id: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
registries: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegistryRegistryArgs']]]]] = None,
__props__=None):
"""
Provides a Rancher v2 Registry resource. This can be used to create docker registries for Rancher v2 environments and retrieve their information.
Depending of the availability, there are 2 types of Rancher v2 docker registries:
- Project registry: Available to all namespaces in the `project_id`
- Namespaced registry: Available to just `namespace_id` in the `project_id`
## Example Usage
```python
import pulumi
import pulumi_rancher2 as rancher2
# Create a new rancher2 Project Registry
foo = rancher2.Registry("foo",
description="Terraform registry foo",
project_id="<project_id>",
registries=[rancher2.RegistryRegistryArgs(
address="test.io",
password="<PASSWORD>",
username="user",
)])
```
```python
import pulumi
import pulumi_rancher2 as rancher2
# Create a new rancher2 Namespaced Registry
foo = rancher2.Registry("foo",
description="Terraform registry foo",
namespace_id="<namespace_id>",
project_id="<project_id>",
registries=[rancher2.RegistryRegistryArgs(
address="test.io",
password="<PASSWORD>",
username="user2",
)])
```
## Import
Registries can be imported using the registry ID in the format `<namespace_id>.<project_id>.<registry_id>`
```sh
$ pulumi import rancher2:index/registry:Registry foo <namespace_id>.<project_id>.<registry_id>
```
`<namespace_id>` is optional, just needed for namespaced registry.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Mapping[str, Any]] annotations: Annotations for Registry object (map)
:param pulumi.Input[str] description: A registry description (string)
:param pulumi.Input[Mapping[str, Any]] labels: Labels for Registry object (map)
:param pulumi.Input[str] name: The name of the registry (string)
:param pulumi.Input[str] namespace_id: The namespace id where to assign the namespaced registry (string)
:param pulumi.Input[str] project_id: The project id where to assign the registry (string)
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegistryRegistryArgs']]]] registries: Registries data for registry (list)
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: RegistryArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Rancher v2 Registry resource. This can be used to create docker registries for Rancher v2 environments and retrieve their information.
Depending of the availability, there are 2 types of Rancher v2 docker registries:
- Project registry: Available to all namespaces in the `project_id`
- Namespaced registry: Available to just `namespace_id` in the `project_id`
## Example Usage
```python
import pulumi
import pulumi_rancher2 as rancher2
# Create a new rancher2 Project Registry
foo = rancher2.Registry("foo",
description="Terraform registry foo",
project_id="<project_id>",
registries=[rancher2.RegistryRegistryArgs(
address="test.io",
password="<PASSWORD>",
username="user",
)])
```
```python
import pulumi
import pulumi_rancher2 as rancher2
# Create a new rancher2 Namespaced Registry
foo = rancher2.Registry("foo",
description="Terraform registry foo",
namespace_id="<namespace_id>",
project_id="<project_id>",
registries=[rancher2.RegistryRegistryArgs(
address="test.io",
password="<PASSWORD>",
username="user2",
)])
```
## Import
Registries can be imported using the registry ID in the format `<namespace_id>.<project_id>.<registry_id>`
```sh
$ pulumi import rancher2:index/registry:Registry foo <namespace_id>.<project_id>.<registry_id>
```
`<namespace_id>` is optional, just needed for namespaced registry.
:param str resource_name: The name of the resource.
:param RegistryArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(RegistryArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
annotations: Optional[pulumi.Input[Mapping[str, Any]]] = None,
description: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace_id: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
registries: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegistryRegistryArgs']]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = RegistryArgs.__new__(RegistryArgs)
__props__.__dict__["annotations"] = annotations
__props__.__dict__["description"] = description
__props__.__dict__["labels"] = labels
__props__.__dict__["name"] = name
__props__.__dict__["namespace_id"] = namespace_id
if project_id is None and not opts.urn:
raise TypeError("Missing required property 'project_id'")
__props__.__dict__["project_id"] = project_id
if registries is None and not opts.urn:
raise TypeError("Missing required property 'registries'")
__props__.__dict__["registries"] = registries
super(Registry, __self__).__init__(
'rancher2:index/registry:Registry',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
annotations: Optional[pulumi.Input[Mapping[str, Any]]] = None,
description: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace_id: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
registries: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegistryRegistryArgs']]]]] = None) -> 'Registry':
"""
Get an existing Registry resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Mapping[str, Any]] annotations: Annotations for Registry object (map)
:param pulumi.Input[str] description: A registry description (string)
:param pulumi.Input[Mapping[str, Any]] labels: Labels for Registry object (map)
:param pulumi.Input[str] name: The name of the registry (string)
:param pulumi.Input[str] namespace_id: The namespace id where to assign the namespaced registry (string)
:param pulumi.Input[str] project_id: The project id where to assign the registry (string)
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegistryRegistryArgs']]]] registries: Registries data for registry (list)
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _RegistryState.__new__(_RegistryState)
__props__.__dict__["annotations"] = annotations
__props__.__dict__["description"] = description
__props__.__dict__["labels"] = labels
__props__.__dict__["name"] = name
__props__.__dict__["namespace_id"] = namespace_id
__props__.__dict__["project_id"] = project_id
__props__.__dict__["registries"] = registries
return Registry(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def annotations(self) -> pulumi.Output[Mapping[str, Any]]:
"""
Annotations for Registry object (map)
"""
return pulumi.get(self, "annotations")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
A registry description (string)
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def labels(self) -> pulumi.Output[Mapping[str, Any]]:
"""
Labels for Registry object (map)
"""
return pulumi.get(self, "labels")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the registry (string)
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="namespaceId")
def namespace_id(self) -> pulumi.Output[Optional[str]]:
"""
The namespace id where to assign the namespaced registry (string)
"""
return pulumi.get(self, "namespace_id")
@property
@pulumi.getter(name="projectId")
def project_id(self) -> pulumi.Output[str]:
"""
The project id where to assign the registry (string)
"""
return pulumi.get(self, "project_id")
@property
@pulumi.getter
def registries(self) -> pulumi.Output[Sequence['outputs.RegistryRegistry']]:
"""
Registries data for registry (list)
"""
return pulumi.get(self, "registries")
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.