blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
38717c7c5e31cde86fb52b54eaeb64ce63792cf6 | 599ddd95bd9383752dbcad51642791541f7013db | /autoencoder_model.py | 6e2dc4bda0794156a75cdaf714b35ce18f0de856 | [
"MIT"
] | permissive | neemperor/UnDeepVO | e04e36cc28b63f424808ea5ced8ca7e024a4219f | 9159febdf8be7a317fd6f60cc29c5b1d41e6d1ec | refs/heads/master | 2022-06-17T19:43:36.730430 | 2020-04-26T16:17:49 | 2020-04-26T16:17:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,954 | py | from keras import Model
from keras.layers import Conv2D, Conv2DTranspose, concatenate
from keras.optimizers import Adam
class AutoEncoderModel(object):
def __init__(self, left_input, right_input, lr=1e-4, rows=128, cols=512):
self.rows = rows
self.cols = cols
self.left = left_input
self.right = right_input
self.left_est = None
self.right_est = None
self.output = None
self.model = None
self.lr = lr
self.build_architecture()
self.build_outputs()
self.build_model()
@staticmethod
def conv(input, channels, kernel_size, strides, activation='elu'):
return Conv2D(channels, kernel_size=kernel_size, strides=strides, padding='same', activation=activation)(input)
@staticmethod
def deconv(input, channels, kernel_size, scale):
return Conv2DTranspose(channels, kernel_size=kernel_size, strides=scale, padding='same')(input)
def conv_block(self, input, channels, kernel_size):
conv1 = self.conv(input, channels, kernel_size, 1)
conv2 = self.conv(conv1, channels, kernel_size, 2)
return conv2
def deconv_block(self, input, channels, kernel_size, skip):
deconv1 = self.deconv(input, channels, kernel_size, 2)
if skip is not None:
concat1 = concatenate([deconv1, skip], 3)
else:
concat1 = deconv1
iconv1 = self.conv(concat1, channels, kernel_size, 1)
return iconv1
def get_output(self, deconv):
return self.conv(deconv, 3, 3, 1, 'sigmoid')
def build_architecture(self):
# encoder
conv1 = self.conv_block(self.left, 32, 7)
conv2 = self.conv_block(conv1, 64, 5)
conv3 = self.conv_block(conv2, 128, 3)
conv4 = self.conv_block(conv3, 256, 3)
conv5 = self.conv_block(conv4, 512, 3)
conv6 = self.conv_block(conv5, 512, 3)
conv7 = self.conv_block(conv6, 512, 3)
# skips
skip1 = conv1
skip2 = conv2
skip3 = conv3
skip4 = conv4
skip5 = conv5
skip6 = conv6
deconv7 = self.deconv_block(conv7, 512, 3, skip6)
deconv6 = self.deconv_block(deconv7, 512, 3, skip5)
deconv5 = self.deconv_block(deconv6, 256, 3, skip4)
deconv4 = self.deconv_block(deconv5, 128, 3, skip3)
deconv3 = self.deconv_block(deconv4, 64, 3, skip2)
deconv2 = self.deconv_block(deconv3, 32, 3, skip1)
deconv1 = self.deconv_block(deconv2, 16, 3, None)
self.output = self.get_output(deconv1)
def build_outputs(self):
self.left_est = self.output
# self.right_est = expand_dims(self.output, 1, 'right_estimate')
def build_model(self):
self.model = Model(inputs=[self.left], outputs=[self.left_est])
self.model.compile(loss=['mae'],
optimizer='adadelta',
metrics=['mse'])
| [
"[email protected]"
] | |
2695033b85102d7d598e889b156c7ad37c13df3c | 8d371fc0a83f05a7a34927f27ef634b10069b081 | /trail/trail/spiders/trail_spider.py | 54ff8d2d476326bb0809364afeb07991f2bf971c | [] | no_license | tmartin357/SlackTrail | 4ef09f6b7c1ce7a1fc6f8b9afa06bece54e5b5c3 | cf00e010eca2cc4ceee3e9956f76cd7ce7bfd1c5 | refs/heads/master | 2020-04-11T16:31:43.968076 | 2019-01-16T23:17:47 | 2019-01-16T23:17:47 | 161,927,465 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,719 | py | import scrapy
import re
class QuotesSpider(scrapy.Spider):
name = "trails"
def start_requests(self):
urls = [
'https://trailhead.com/me/jameldjackson',
'https://trailhead.com/me/tmartin357',
'https://trailhead.com/me/BabatundeAborisade',
# 'https://trailhead.com/me/adhunter',
'https://trailhead.com/me/cyndiewandia',
'https://trailhead.com/me/jaroper86'
]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
name = response.xpath('//html/body/div[1]/main/div/div/div/div[1]/section/div[2]/div[1]').extract_first()
print(name)
matchObj = re.match( '.*full_name":"(.*)","work.*', name)
name = matchObj.group(1)
# myurl = '<a href="'+response.request.url+'">Link</a>'
myurl = response.request.url
mybadges = int(response.xpath('/html/body/div[1]/main/div/div/div/div[1]/section/div[2]/div[2]/div/div[1]/div[2]/text()').extract_first().replace(',', ''))
mypoints = int(response.xpath('/html/body/div[1]/main/div/div/div/div[1]/section/div[2]/div[2]/div/div[2]/div[2]/text()').extract_first().replace(',', ''))
myrank = "Scout"
if mypoints >= 200 and mybadges >= 1:
myrank = "Hiker"
if mypoints >= 3000 and mybadges >= 5:
myrank = "Explorer"
if mypoints >= 9000 and mybadges >= 10:
myrank = "Adventurer"
if mypoints >= 18000 and mybadges >= 25:
myrank = "Mountaineer"
if mypoints >= 35000 and mybadges >= 50:
myrank = "Expeditioner"
if mypoints >= 50000 and mybadges >= 100:
myrank = "Ranger"
yield {
'name': name,
'badges': mybadges,
'points': mypoints,
'url': str(myurl),
'rank': myrank
}
| [
"[email protected]"
] | |
3de69146548769e595fd3ee6abeb17031185bfd2 | 560da392f8a2d0e3e75314d64d5701c1ee41378a | /_old/kadai1_3.py | e2eb42f9708511cceac0452d88d51ade3381d23b | [] | no_license | YenR/python_intro | 09de9532f7ddacd58aa9a8c143e320c28860cb16 | 73c47c7e2fb80b7217083bfeb9f5f51b7bf180b8 | refs/heads/master | 2020-05-29T14:59:07.966382 | 2019-07-15T15:52:47 | 2019-07-15T15:52:47 | 189,208,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 856 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 19 13:26:48 2019
@author: u593120c
課題1-3:下記のようにニュートン法(※)で探索するように変更し、調べた回数を変更前と比較せよ。
"""
a= float(input("Please input a positive number: "))
x= 1.0
epsilon = 0.01
numGuesses = 1
# f(x) = x²-a=0 implementation / 実装
def f(x,a):
return x**2 - a
# f'(x) = 2x=0 implementation / 実装
def fdev(x):
return 2*x
while abs(x**2-a)>=epsilon:
xnew = x -f(x,a)/fdev(x)
if(x==xnew):
break
x=xnew
#print("x: ", x)
numGuesses += 1
print("numGuesses = ", numGuesses) # 調べた回数
if abs(x**2 -a) >= epsilon:
print("Failed on square root of ", a)
print("Closest guess: ", x, " pow: ", x**2, " delta: ", abs(x**2-a))
else:
print(x," is close to square root of ", a)
| [
"[email protected]"
] | |
d5c3a71453efe07f06afe3e7fc2a1100ee47de6b | 324ce00226bc082eeec26fae6a71d95849f24ee9 | /cpu.py | dd8c4dc0f29fba982ef03001b1ddbc29bc74fdb9 | [] | no_license | iper4497/python | 6b1944f08f2e41b57b74c1311dc1e4ba41dc5715 | 99d43a7a41b49b67779bacef20673d8a599f64d1 | refs/heads/master | 2021-01-20T19:29:55.004820 | 2017-04-25T16:37:45 | 2017-04-25T16:37:45 | 60,462,166 | 0 | 0 | null | 2016-06-08T16:59:16 | 2016-06-05T13:45:25 | Python | UTF-8 | Python | false | false | 135 | py | p = 1
f = 1
while True:
p = p + p
a = [ ]
for b in range(p):
a.append(b)
c = b
print(c)
if c > 10000000:
p = f + 1
f = p
| [
"[email protected]"
] | |
4800616aabba38aa6b9a82323c33a2963d14ba50 | 2da4dd290ae4b2f5a9106c6ce7e584d485b1995e | /pythonFiles/dnnTraining/FFN_gridSearch/thinLincGrid/FFN_Model_gridSearch.py | d9934957450293ed3dcbe540205ff138811fc0da | [] | no_license | zhaoforever/dnn-SpeechEnhancement-1 | 81eb85c9711eeae5f720c42cd79ba70aa7c59782 | 4125e0a5a63544889de6cfefc0558008798056ef | refs/heads/master | 2020-03-27T23:14:44.947522 | 2018-09-03T08:48:36 | 2018-09-03T08:48:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | import tensorflow as tf
slim = tf.contrib.slim
import numpy as np
def defineFFN(next_feat_pl,NUM_CLASSES,slimStack,conigN):
with slim.arg_scope([slim.fully_connected],activation_fn=tf.nn.leaky_relu, weights_initializer=tf.truncated_normal_initializer(stddev=np.sqrt(1/257)),reuse=tf.AUTO_REUSE),tf.variable_scope('FFN'):
x = slim.stack(next_feat_pl, slim.fully_connected,slimStack,scope=('fc'+str(conigN)))
preds = slim.fully_connected(x, num_outputs=NUM_CLASSES,scope=('fcOut'+str(conigN)))
return tf.identity(preds,name='preds2')
| [
"[email protected]"
] | |
ac2f02367f4f4a0823ff315413f8a78ac99b2fdd | 442d79f6165fea1bb8a76ed5df537ce2eefb8578 | /elastic/migrations/0001_initial.py | 144a8ec978f9468223fc21b3192d04e2fdebf88c | [] | no_license | Shivamgera/OMDB_Search | aecf20aacfd6a52313b8b9e5fab153e1920e98d2 | 7926083cd04a6a9fbe4200b66df8d31eb803af17 | refs/heads/master | 2020-12-09T20:42:47.807839 | 2020-10-31T19:48:32 | 2020-10-31T19:48:32 | 233,410,830 | 0 | 0 | null | 2020-10-31T19:48:33 | 2020-01-12T15:07:07 | Python | UTF-8 | Python | false | false | 1,235 | py | # Generated by Django 2.1.5 on 2020-01-14 11:25
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Genres',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('genre', models.CharField(max_length=60)),
],
),
migrations.CreateModel(
name='Movies',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now=True)),
('title', models.CharField(max_length=255)),
('year', models.IntegerField()),
('rating', models.FloatField()),
('genre', models.TextField(default='No Genre')),
],
),
migrations.AddField(
model_name='genres',
name='movie_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='elastic.Movies'),
),
]
| [
"[email protected]"
] | |
1c4eae8c8b14c9193edd4274cdd067c9ccd8b5b4 | 995edc8943a5bbd81fa0b714b0f65deef83ae563 | /groups/views.py | 5062e5b06a291617d43b6e7a58ace77d642ee658 | [] | no_license | shrey920/socio | a7b110383cf8b2940ce26ba71d61eee78c47fda9 | 345dcdb3ceb5f27ea6ce0b3ff23b4292d5015b5d | refs/heads/master | 2022-12-09T03:27:16.965187 | 2019-12-18T14:08:39 | 2019-12-18T14:08:39 | 124,672,221 | 1 | 1 | null | 2022-12-08T01:05:05 | 2018-03-10T16:09:05 | CSS | UTF-8 | Python | false | false | 6,530 | py | from django.views import generic
from django.shortcuts import render,redirect
from django.http import HttpResponseRedirect
from django.contrib.auth import authenticate, login, logout
from django.views.generic.edit import CreateView,UpdateView,DeleteView
from django.urls import reverse_lazy
from django.contrib.auth import get_user_model
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.decorators import login_required
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import authentication, permissions
from .models import *
from profiles.models import *
from chat.models import *
from posts.models import *
# Create your views here.
User = get_user_model()
class CreateGroupView(LoginRequiredMixin, generic.CreateView):
login_url = '/login'
model = Group
fields = ['name', 'info', 'picture']
def form_valid(self, form):
user=self.request.user
if form.is_valid():
form.save()
form.instance.admin.add(user)
form.instance.members.add(user)
chat = ChatRoom()
chat.eid = form.instance.name
chat.group = form.instance
chat.save()
chat.members.add(user)
return redirect('groups:groupDetail',form.instance.name)
class UpdateGroupView(LoginRequiredMixin, generic.UpdateView):
login_url = '/login'
model = Group
fields = ['name', 'info', 'picture']
def dispatch(self, request, *args, **kwargs):
user=self.request.user
group=Group.objects.get(pk=self.kwargs['pk'])
if user not in group.admin.all():
return redirect('home')
else:
if request.method.lower() in self.http_method_names:
handler = getattr(self, request.method.lower(), self.http_method_not_allowed)
else:
handler = self.http_method_not_allowed
return handler(request, *args, **kwargs)
def form_valid(self, form):
if form.is_valid():
form.save()
return redirect('groups:groupDetail',form.instance.name)
class GroupView(LoginRequiredMixin, generic.DetailView):
login_url = '/login'
template_name='groups/group_view.html'
context_object_name = 'context'
def get_object(self, queryset=Profile.objects):
group=Group.objects.get(name=self.kwargs['name'])
member=False
admin=False
all_posts=group.post_set.all()
if self.request.user in group.admin.all():
admin = True
if self.request.user in group.members.all():
member = True
members=group.members.count()
return {'group': group,'admin': admin, 'member': member, 'all_posts': all_posts,'members':members }
@login_required(login_url='/login')
def addMember(request,pk):
group = Group.objects.get(pk=pk)
if request.user not in group.admin.all():
return redirect('home')
return render(request,'groups/members.html',{'group':group})
class AddMemberView(LoginRequiredMixin, APIView):
login_url = '/login'
authentication_classes = (authentication.SessionAuthentication,)
permission_classes = (permissions.IsAuthenticated,)
def get(self, request, pk, format=None):
group=Group.objects.get(pk=pk)
friend=User.objects.get(username=request.GET['friend'])
if friend in group.members.all():
group.members.remove(friend)
group.chatroom.members.remove(friend)
group.admin.remove(friend)
status='removed'
else:
group.members.add(friend)
group.chatroom.members.add(friend)
status='added'
data = {
'status':status
}
return Response(data)
class AddAdminView(LoginRequiredMixin, APIView):
login_url = '/login'
authentication_classes = (authentication.SessionAuthentication,)
permission_classes = (permissions.IsAuthenticated,)
def get(self, request, pk, format=None):
group=Group.objects.get(pk=pk)
friend=User.objects.get(username=request.GET['friend'])
if friend in group.admin.all():
group.admin.remove(friend)
status='removed'
else:
group.admin.add(friend)
status='added'
data = {
'status':status
}
return Response(data)
class GroupListView(LoginRequiredMixin, generic.ListView):
login_url = '/login'
template_name = 'groups/group_list.html'
context_object_name = 'all_groups'
def get_queryset(self):
q = self.request.user.group_member.all()
return q
class DeleteGroupView(LoginRequiredMixin,generic.DeleteView):
login_url = '/login'
model = Group
success_url = reverse_lazy('groups:groupList')
def delete(self, request, *args, **kwargs):
group = Group.objects.get(pk=kwargs['pk'])
if self.request.user not in group.admin.all():
return redirect('home')
self.object = self.get_object()
self.object.delete()
return HttpResponseRedirect(self.get_success_url())
@login_required(login_url='/login')
def leaveGroup(request,pk):
group=Group.objects.get(pk=pk)
if request.user not in group.members.all():
return redirect('home')
group.members.remove(request.user)
group.chatroom.members.remove(request.user)
if group.members.count() == 0:
group.delete()
elif request.user in group.admin.all():
group.admin.remove(request.user)
if group.admin.count() == 0:
group.admin.add(group.members.all()[0])
return redirect('home')
class CreatePost(LoginRequiredMixin,CreateView):
login_url = '/login'
model=Post
fields = ['title','text','image','file']
def dispatch(self, request, *args, **kwargs):
if request.user not in Group.objects.get(pk=self.kwargs['pk']).members.all():
return redirect('home')
if request.method.lower() in self.http_method_names:
handler = getattr(self, request.method.lower(), self.http_method_not_allowed)
else:
handler = self.http_method_not_allowed
return handler(request, *args, **kwargs)
def form_valid(self, form):
form.instance.owner = self.request.user
form.instance.group = Group.objects.get(pk=self.kwargs['pk'])
if form.is_valid():
form.save()
return redirect('home')
| [
"[email protected]"
] | |
06adaf5a2b333befc76eb410088c0c1e1914ba7f | 67a239a63fc1f155ad10250e5da7f92f0d2602f8 | /airflow/providers/google/common/links/storage.py | 7934d95d334195af97147ea0620b3fdd77fea3f6 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | dskoda1/airflow | e62519a28fa8b0f84970b4cf2f52141a16080815 | c0e9daa3632dc0ede695827d1ebdbd091401e94d | refs/heads/main | 2022-11-21T13:02:36.150877 | 2022-06-02T05:07:45 | 2022-06-02T05:07:45 | 279,863,324 | 0 | 0 | Apache-2.0 | 2020-07-15T12:33:03 | 2020-07-15T12:33:02 | null | UTF-8 | Python | false | false | 2,229 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a link for GCS Storage assets."""
from typing import TYPE_CHECKING, Optional
from airflow.models import BaseOperator
from airflow.providers.google.cloud.links.base import BaseGoogleLink
BASE_LINK = "https://console.cloud.google.com"
GCS_STORAGE_LINK = BASE_LINK + "/storage/browser/{uri};tab=objects?project={project_id}"
GCS_FILE_DETAILS_LINK = BASE_LINK + "/storage/browser/_details/{uri};tab=live_object?project={project_id}"
if TYPE_CHECKING:
from airflow.utils.context import Context
class StorageLink(BaseGoogleLink):
"""Helper class for constructing GCS Storage link"""
name = "GCS Storage"
key = "storage_conf"
format_str = GCS_STORAGE_LINK
@staticmethod
def persist(context: "Context", task_instance, uri: str):
task_instance.xcom_push(
context=context,
key=StorageLink.key,
value={"uri": uri, "project_id": task_instance.project_id},
)
class FileDetailsLink(BaseGoogleLink):
"""Helper class for constructing GCS file details link"""
name = "GCS File Details"
key = "file_details"
format_str = GCS_FILE_DETAILS_LINK
@staticmethod
def persist(context: "Context", task_instance: BaseOperator, uri: str, project_id: Optional[str]):
task_instance.xcom_push(
context=context,
key=FileDetailsLink.key,
value={"uri": uri, "project_id": project_id},
)
| [
"[email protected]"
] | |
6634504430c78baa0380aac6c2379999128c94cb | c5e4de00279b77e9d50bcd2a38dd5bc968921ac9 | /django2/learning/main/apps/test_app/models.py | 3fce211c5fec0cccd368c02a077ab5a959b2dbc4 | [] | no_license | jennuinecode/learning_django | d7cbe25af6972c3d80803c5fdc9a971eff8e3600 | 2e1e7f7e61a52d92d2b8fc93996f5a5052f4bee8 | refs/heads/master | 2020-04-06T04:01:58.479363 | 2017-03-27T17:50:48 | 2017-03-27T17:50:48 | 83,070,044 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 515 | py | from __future__ import unicode_literals
from django.db import models
class Blog(models.Model):
title = models.CharField(max_length=100)
blog = models.TextField(max_length=1000)
created_at = models.DateField(auto_now_add=True)
updated_at = models.DateField(auto_now_add=True)
class Comment(models.Model):
blog_id = models.ForeignKey(Blog)
comment = models.CharField(max_length=1000)
created_at = models.DateField(auto_now_add=True)
updated_at = models.DateField(auto_now_add=True)
| [
"[email protected]"
] | |
c66d78df9ffdb2f1bd3151ad8e532d31a5a809b8 | 9ba1f46453cdf4568c93e95caada451bbb4399fc | /portfolioAPI/portfolioAPI/portfolioAPI/wsgi.py | 82f79bc8618295e9b4c45c68c5b81412fdfbe24c | [] | no_license | laracork/Carta-PortfolioAPI | d8759edbb15c73255742d589ab3207a6f48dc9e7 | 45e6d873da8e8b679d909a6809985ebe541ce2c2 | refs/heads/master | 2022-12-24T19:36:09.305907 | 2018-12-04T07:53:06 | 2018-12-04T07:53:06 | 160,261,931 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | """
WSGI config for portfolioAPI project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'portfolioAPI.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
ff3215198197ffc05d5de16d675515d2f2296d0a | 87fe4181a085dccc2be4b0116181dd834db8f388 | /nomadgram/users/migrations/0003_auto_20171102_2125.py | d5f765ee24b0112bc6beff5f435cc107a6303f19 | [
"MIT"
] | permissive | hebaragiseed/nomadgram | 9683dae7c1b55ba3e55c1f1c5e1557ac4c9173af | 2351b70ba896d9f7550deb84ae7a1f7ae9c8b3d5 | refs/heads/master | 2021-05-07T22:10:15.458132 | 2017-12-13T18:46:16 | 2017-12-13T18:46:16 | 109,048,559 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 741 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-11-02 12:25
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20171102_1655'),
]
operations = [
migrations.AddField(
model_name='user',
name='followers',
field=models.ManyToManyField(related_name='_user_followers_+', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='user',
name='following',
field=models.ManyToManyField(related_name='_user_following_+', to=settings.AUTH_USER_MODEL),
),
]
| [
"[email protected]"
] | |
cfb32d3d8793db242c4673f5d5e2c5d2aad685db | 87943eaf435f35156e8dfa95d47e94de73705556 | /apps/common/common.py | 108b9ac47ce9659bae8262d4e5674c9e1cbd40ab | [] | no_license | youyingxiang/yxx_admin | 2c145f1ccdd26244008b43bfbc0ffc9426953ca1 | 7c546284eec222aa3a7fd4b50e15a9d338858e44 | refs/heads/master | 2023-03-11T09:46:20.759913 | 2020-05-16T17:09:07 | 2020-05-16T17:09:07 | 134,954,103 | 75 | 23 | null | 2022-12-08T02:08:01 | 2018-05-26T11:04:49 | Python | UTF-8 | Python | false | false | 161 | py | # 公用函数文件
from wtforms import Form
class FormBase(Form):
def get_err_one(self):
err = self.errors.popitem()[1][0]
return err
| [
"[email protected]"
] | |
c84dff9f1eb441fa427605385e28ccd176b193f1 | 6f9913971b07093d85d7395d438e30992d7b7456 | /ex3.py | 8f875b2554be5b6c32f8854ecfc23fabaffb4330 | [] | no_license | Lhotse867745418/Python3_Hardway | 1775d3b77073b25d50fec041917e85b8ae902c8e | b647cdd5bed5236096eedf9c752fda4e261a4da9 | refs/heads/master | 2021-09-10T11:13:43.879478 | 2018-03-25T11:01:53 | 2018-03-25T11:01:53 | 125,729,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 958 | py | #-------------------------------------------------------------------------------
# Name: numbers and math
# Purpose: learn math
#
# Author: lhotse
#
# Created: 17/03/2018
# Copyright: (c) lhotse 2018
# Licence:
#-------------------------------------------------------------------------------
print("i will now count my chickens")
print("hens", 25 + 30/6)
# difference between floating and integer
#print("roosters", 100 - 25*3%4)
print("roosters", 100 - 25*3.0%4)
print("now i will count the eggs:")
print(3 + 2 + 1 - 5 + 4%2 - 1/4 + 6)
print("is it true that 3 + 2 < 5 - 7 ?")
print(3 + 2 < 5 - 7)
print("what is 3 + 2?", 3 + 2)
print("what is 5 - 7?", 5 - 7)
print("oh!,that's why it's false.")
print("how about some more.")
print("is it greater or equal?", 5 >= -2)
print("is it less or equal?", 5 <= -2)
def main():
pass
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
a28d8b93e2c943b416dc0e882ce5ceeaff0889f8 | 551ef0567aca428a535775d3949f5d9670c0d29c | /abc/212/c/main.py | 9351e78f01dac0c8505cf070b9bc58365ee2fbc7 | [] | no_license | komo-fr/AtCoder | 7451a9402466ce8d487d0c521128732061c647df | c916889294cb12f21e74254de43b3e17e1b354bc | refs/heads/master | 2023-07-22T07:05:52.955188 | 2023-03-01T14:22:16 | 2023-03-01T14:22:16 | 213,109,943 | 0 | 0 | null | 2023-07-06T22:01:28 | 2019-10-06T04:44:49 | Python | UTF-8 | Python | false | false | 529 | py | #!/usr/bin/env python3
N, M = list(map(int, input().split()))
a_list = list(map(int, input().split()))
b_list = list(map(int, input().split()))
ab_list = []
for a in a_list:
ab_list.append((a, "a"))
for b in b_list:
ab_list.append((b, "b"))
ab_list = sorted(ab_list)
a = None
b = None
min_y = float("inf")
for x in ab_list:
if x[1] == "a":
a = x[0]
if x[1] == "b":
b = x[0]
if a is not None and b is not None:
y = abs(a - b)
min_y = min([y, min_y])
ans = min_y
print(ans)
| [
"[email protected]"
] | |
7189a6757c3e6f1b331a3105fbf5afa2d5263dd5 | 40c40be9916c1eba72588bdb301ce9ef1ad9de7c | /0x06-python-classes/4-square.py | 6f762831f9162e526ec1030581217b8cb8172ee6 | [] | no_license | Tr3v1n4t0r/holbertonschool-higher_level_programming | 943f4677553b6d59ae07b8eec328ea5b5a4f605e | f07d00adb4cf6b63072a5a095a51ecc747adb594 | refs/heads/master | 2020-09-29T02:59:28.923041 | 2020-05-15T05:52:06 | 2020-05-15T05:52:06 | 226,933,853 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 434 | py | #!/usr/bin/python3
class Square:
def __init__(self, size=0):
self.size = size
@property
def size(self):
return self.__size
@size.setter
def size(self, value):
if type(value) != int:
raise TypeError('size must be an integer')
if value < 0:
raise ValueError('size must be >= 0')
self.__size = value
def area(self):
return self.__size**2
| [
"[email protected]"
] | |
32a50f5f84c0d94bfd460e73b46f74acd671f142 | fa740eac801924cf053a71836b310ff3b4479c4d | /game/pong_simple_ai.py | 3308831c3d4d903fb8e99fef5c5e22e8d44486c0 | [] | no_license | JoanneLin168/pongML | f701446a24c9a7c673b7491bc9c1590a8f3d5b76 | b63dbeca269d1cfb3844f08afd5a272c7b14e8cc | refs/heads/master | 2023-02-07T14:07:24.449328 | 2021-01-03T18:42:31 | 2021-01-03T18:42:31 | 326,065,707 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,533 | py | # Simple pygame program
# Import and initialize the pygame library
import pygame
import paddle
import ball
### SETUP ###
pygame.init()
def show_score(screen, score):
score_overlay = FONT.render('Score: '+str(score), True, FONT_COLOUR)
screen.blit(score_overlay, (10, 5))
# FPS Counter
def show_fps(screen, clock):
fps_overlay = FONT.render('FPS: '+str(int(clock.get_fps())), True, FONT_COLOUR)
screen.blit(fps_overlay, (screen_width - 80, 5))
clock = pygame.time.Clock()
FPS = 60
FONT = pygame.font.SysFont("Arial", 20)
FONT_COLOUR = pygame.Color("white")
screen_width = 640
screen_height = 640
# Set up the drawing window
screen = pygame.display.set_mode([screen_width, screen_height])
ball = ball.Ball()
ball.x = screen_width/2
ball.y = ball.radius + 20
paddle = paddle.Paddle()
paddle.x = screen_width/2 - paddle.width/2
paddle.y = screen_height - paddle.height - 20
# Colour
ball_colour = (254,74,73)
paddle_colour = (254,215,102)
bg_colour = (69, 69, 69)
### GAME ###
# Run until the user asks to quit
running = True
score = 0
hit_buffer = 0
while running:
# Did the user click the window close button?
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
# Fill the background with white
screen.fill(bg_colour)
# Draw text
show_fps(screen, clock)
show_score(screen, score)
# Draw a solid blue circle in the center
# Parameters: screen, colour, position, radius
pygame.draw.circle(screen, ball_colour, (ball.x, ball.y), ball.radius)
pygame.draw.rect(screen, paddle_colour, (paddle.x, paddle.y, paddle.width, paddle.height))
keys = pygame.key.get_pressed()
if (paddle.x+(paddle.width/2) > ball.x and paddle.x >= 0):
paddle.x -= paddle.speed
if (paddle.x+(paddle.width/2) < ball.x and paddle.x+paddle.width <= screen_width):
paddle.x += paddle.speed
hit_paddle = False
if ((paddle.y-2 <= ball.y+ball.radius <= paddle.y+2) and (paddle.x <= ball.x <= paddle.x+paddle.width)):
if (hit_buffer == 0):
hit_paddle = True
score += 1
hit_buffer = 20
ball.move(screen_width, screen_height, hit_paddle)
if (hit_buffer > 0):
hit_buffer -= 1
if ball.dx == 0 and ball.dy == 0:
running = False
clock.tick(FPS)
# Flip the display
pygame.display.flip()
# Done! Time to quit.
pygame.quit()
| [
"[email protected]"
] | |
4f6c3c3b84254e921f2c0c5e943bbdf9507428ac | 54277288865f738e44d7be1d6b41b19c63af267e | /configs/vcop/pretraining/r3d_18_ucf101.py | eee5d0eb75ac9265d0ceb4f2ff2d8593597c7c29 | [] | no_license | scenarios/SR-SVRL | 7b41d29e16cff3020f333efc28a624d85bba4537 | 26e89ecb29355635b10a355f2f16f1b5db9c4e9b | refs/heads/master | 2023-02-26T06:16:13.314491 | 2021-01-30T16:30:57 | 2021-01-30T16:30:57 | 307,295,720 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 652 | py | _base_ = ['./default_runtime.py']
work_dir = './output/vcop/pretraining/r3d_18_ucf101'
model = dict(
type='VCOP',
backbone=dict(
type='R3D',
depth=18,
num_stages=4,
stem=dict(
temporal_kernel_size=3,
temporal_stride=1,
in_channels=3,
with_pool=False,
),
down_sampling=[False, True, True, True],
channel_multiplier=1.0,
bottleneck_multiplier=1.0,
with_bn=True,
pretrained=None,
),
vcop_head=dict(
in_channels=512,
tuple_len=3,
hidden_channels=512,
dropout_ratio=0.25
)
)
| [
"[email protected]"
] | |
ccd6f80811a21dcdf462628ab9ef49956adb145b | b4a22431ee78300802af8c11a989baca6e6b2688 | /setup.py | af3d44f45e3f80a1fff5d242d5d48a6473198514 | [] | no_license | sfbrigade/open_ballot | 53f33c0a5b8f0b7d94854bcfb240e57d6a9fa411 | 2e8b59d8e1d3e6f8347c533650cbe372dd0c4a95 | refs/heads/master | 2020-05-01T07:58:36.625072 | 2014-05-29T02:38:59 | 2014-05-29T02:38:59 | 13,112,452 | 1 | 0 | null | 2015-11-10T22:12:06 | 2013-09-26T03:54:14 | SQL | UTF-8 | Python | false | false | 1,221 | py | import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.md')) as f:
README = f.read()
with open(os.path.join(here, 'CHANGES.txt')) as f:
CHANGES = f.read()
requires = [
'pyramid',
'pyramid_chameleon',
'pyramid_debugtoolbar',
'pyramid_tm',
'SQLAlchemy',
'transaction',
'zope.sqlalchemy',
'waitress',
]
setup(name='open_ballot',
version='0.0',
description='open_ballot',
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Programming Language :: Python",
"Framework :: Pyramid",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author='',
author_email='',
url='',
keywords='web wsgi bfg pylons pyramid',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
test_suite='open_ballot',
install_requires=requires,
entry_points="""\
[paste.app_factory]
main = open_ballot:main
[console_scripts]
initialize_open_ballot_db = open_ballot.scripts.initializedb:main
""",
)
| [
"[email protected]"
] | |
c64a4dd21c4229520708409ded2b44b76b9c60ca | c118c2f09b17e110387946e45b0cd6049e30c8f4 | /lab08_minimax-master/lab08_minimax-master/tests/knapsack/benchmark_small.py | 91fe35261390c2425e25034f251b932dcc5b5f1b | [] | no_license | ZuzannaMisztal/Badania-Operacyjne | 0ec280ef0a5e82b919dbee6284cc4d1c02765099 | 89b516a0c4bcaca1c7a810e37d13eb6b9907be38 | refs/heads/main | 2023-04-15T17:18:52.375676 | 2021-04-23T20:42:06 | 2021-04-23T20:42:06 | 360,997,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 186 | py | from tests.knapsack.knapsack_benchmark import KnapsackBenchmark
problems = ["ks_4_0", "ks_19_0", "ks_30_0", "ks_40_0", "ks_45_0"]
benchmark = KnapsackBenchmark(problems)
benchmark.run() | [
"[email protected]"
] | |
0df64cbead81c2a07c3c63de293eecf1dcd41184 | 875bb84440094ce058a2ec25a661a7da6bb2e129 | /algo_py/boj/bj1212.py | 3005d1075ae35471fa8e65763598520f538478f1 | [] | no_license | shg9411/algo | 150e4291a7ba15990f17ca043ae8ab59db2bf97b | 8e19c83b1dbc0ffde60d3a3b226c4e6cbbe89a7d | refs/heads/master | 2023-06-22T00:24:08.970372 | 2021-07-20T06:07:29 | 2021-07-20T06:07:29 | 221,694,017 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 44 | py | print(bin(int('0o'+input().rstrip(),8))[2:]) | [
"[email protected]"
] | |
020d76615d6bd596f75e25283ea880e7c0a87283 | 1de81bc25cb6da27822273e803a97569e77ffc59 | /perceptron.py | 56bd48cee7387ecba620efbf08db5c57b809aaac | [] | no_license | Huarong/myML | 19cb0c8db4409527cb2202271caed8ce9f8f24ec | 21f7e09af40e843aa440270463f093efea7bb0d1 | refs/heads/master | 2016-09-05T15:08:32.337766 | 2014-02-10T08:55:25 | 2014-02-10T08:55:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,662 | py | #coding=utf-8
import Queue as queue
class PerceptronModel(object):
"""This is an implemetion of Perceptron according "Statistic Learning Method"
written by Li Hang"""
def __init__(self, m=2):
self.m = m
self.w = [0 for i in range(m)]
self.b = 0
def set_w(self, w):
self.w = w
return None
def set_b(self, b):
self.b = b
return None
def train(self, T):
iter_count = 0
print "Iterate Count: %d, w: %s, b: %s" % (
iter_count, self.w, self.b)
while True:
all_correct_classified = True
iter_count += 1
index = 0
for index, (x, y) in enumerate(T):
if not self.correct_classified(x, y):
self.w = [wi + yxi for (wi, yxi) in zip(self.w, (y * xi for xi in x))]
self.b = self.b + y
all_correct_classified = False
break
index += 1
if all_correct_classified:
print "Iterate Count: %d, misclassification: None, w: %s, b: %s" % (
iter_count, self.w, self.b)
break
print "Iterate Count: %d, misclassification: x[%d], w: %s, b: %s" % (
iter_count, index, self.w, self.b)
return None
def correct_classified(self, x, y):
f = y * (sum(wi * xi for (wi, xi) in zip(self.w, x)) + self.b)
if f > 0:
return True
return False
def main():
p = PerceptronModel()
T = [((3, 3), 1), ((4, 3), 1), ((1, 1), -1)]
p.train(T)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
a23aaeb1fe329a362d00beace17f570d5ab087b3 | 5cb3b0b88c1baa2fae9562ac4cad5f84d65221e1 | /w7/demo/demo/core/models.py | b936ac6f63740a7a568da59d6fbd5aca09523171 | [] | no_license | bobur554396/BFDjango2020Spring | aa7ad9a595b247100f876e36585368af078d862e | e7ef04be2cf4d2506c2212ea4509a106e12d4dd4 | refs/heads/master | 2020-12-15T07:07:16.214284 | 2020-04-13T19:16:28 | 2020-04-13T19:16:28 | 235,028,587 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,162 | py | from django.db import models
from rest_framework import serializers
class Publisher(models.Model):
"""Publisher class"""
MALE = 1
FEMALE = 2
GENDER = (
(MALE, 'male'),
(FEMALE, 'female'),
)
name = models.CharField(max_length=300, unique=True)
city = models.CharField(max_length=300)
gender = models.PositiveSmallIntegerField(choices=GENDER, default=MALE)
objects = models.Manager()
class Meta:
verbose_name = 'Publisher'
verbose_name_plural = 'Publishers'
# unique_together = ('name', 'city')
# ordering = ('name',)
# db_table = 'publishers_table'
def __str__(self):
return self.name
def save(self, *args, **kwargs):
pass
class Author(models.Model):
name = models.CharField(max_length=300)
email = models.CharField(max_length=300)
rating = models.IntegerField(default=0)
# creator = models.ForeignKey(MainUser)
def __str__(self):
return self.name
def set_new_rating(self, value):
self.rating = value
self.save()
# @property
def books_count(self):
pass
# return self.books.count()
# print(a.books_count)
# class PublishedBook(models.Manager):
# def get_queryset(self):
# return self.filter(is_published=True)
#
# def filter_by_name(self, name_pattern):
# return self.filter(name__contains=name_pattern)
#
#
# class NotPublishedBook(models.Manager):
# def get_queryset(self):
# return self.filter(is_published=False)
def valid_num_pages(value):
if not(10 >= value >= 5000):
raise serializers.ValidationError('invalid num of pages')
class Book(models.Model):
name = models.CharField(max_length=300)
price = models.FloatField(default=0)
num_pages = models.IntegerField(default=0,
validators=[valid_num_pages])
is_published = models.BooleanField(default=False)
author = models.ForeignKey(Author,
on_delete=models.CASCADE,
related_name='books')
publisher = models.ForeignKey(Publisher,
on_delete=models.CASCADE,
related_name='books')
objects = models.Manager()
# published_books = PublishedBook()
# not_published_books = NotPublishedBook()
@property
def price_round(self):
return round(self.price, 3)
@classmethod
def top_ten(cls):
return cls.objects.all()[:10]
@staticmethod
def cmp_books(book1, book2):
return book1.price > book2.price
# b1 = Book()
# print(b1.price_round)
#
# b2 = Book()
#
# ret = Book.cmp_books(b1, b2)
class Tag(models.Model):
name = models.CharField(max_length=200)
class BookTag(models.Model):
tag = models.ForeignKey(Tag, on_delete=models.CASCADE,
related_name='books')
book = models.ForeignKey(Book, on_delete=models.CASCADE,
related_name='tags')
# t = Tag()
# t.books.all()
#
# b = Book()
# for book_tag in b.tags.all():
# print(book_tag.tag)
#
| [
"[email protected]"
] | |
aa7195a1b211b36ccd80339edc54ae2ddedfa312 | c24e1214c18e724758d73735f431e37c283595c0 | /energy.py | 1f44018853b607107a8c34e85b2b017a5b1fdbfb | [] | no_license | laardi/SOP | 9e140535138b6e79ebbc67a126d0a33de1d1cfc8 | 5cc3dd1f99f4a07105402199b0c16c8eef13829f | refs/heads/master | 2021-01-13T11:38:19.640843 | 2017-02-07T16:49:44 | 2017-02-07T16:50:22 | 81,229,929 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,447 | py | import wave
from yaafelib import *
def _energy(audio_location, sample_rate):
# This function behaves the same as 'python yaafe.py -r SAMPLERATE -f \
# "energy: Energy PARAMETERS" WAV-LOCATION'
# SAMPLERATE : Samplerate of the file being processed
#- blockSize (default=1024): output frames size
#- stepSize (default=512): step between consecutive frames
# Build a dataflow object using FeaturePlan
# blockSize, stepSize could be added too. 1024, 512 default
fp = FeaturePlan(sample_rate=sample_rate)
# Using *.addFeature() multiple extractions can be called with a
# single call
fp.addFeature('energy: Energy')
#('energy: Energy blockSize=1024 stepSize=512')
# Get dataflow
df = fp.getDataFlow()
# Configure engine
engine = Engine()
engine.load(df)
# extract features from audio using AudioFileProcessor
afp = AudioFileProcessor()
afp.processFile(engine, audio_location)
# features array holds all the extracted features
features = engine.readAllOutputs()
# returns the array of features extracted
return features
def take_wav(fileloc):
audio = wave.open(fileloc)
fr = audio.getframerate()
return audio, fr
def main():
fileloc = '/tmp/wav/aanipankki_mono/kissa2.wav'
a, b = take_wav(fileloc)
results = _energy(fileloc, b)
print (results)
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
9dea8f027120df7ac7ae7eddda181e5fc4f7056f | f58bd787e996894c467b2ed7658ff5ab8c2cfb70 | /IT210/Exam2/Exam2beta.py | c0e8da74b4288896b170ea3586d1e923cc8a226e | [] | no_license | rumman07/python | 4250d56a1bec79f554880390ac7278bfe06ba98f | a4ac13fe434fd8235cbfbf7a5e34e81af0f02225 | refs/heads/master | 2023-08-17T00:55:23.254729 | 2023-08-16T07:09:36 | 2023-08-16T07:09:36 | 145,334,740 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,187 | py | def main():
processInput()
def processInput():
#Prompt for the input and output file names
inputFileName = input("Input file: ")
outputFileName = input("Output file: ")
#Open the input and output files.
inputFile = open(inputFileName, "r")
outputFile = open(outputFileName, "w")
for line in inputFile:
parts = line.split(" ")
#Extract the data fields
firstName = [parts[0]]
lastName = [parts[1]]
techId = [parts[2]]
creditsTaken = [int(parts[3])]
qualityPoints = [int(parts[4])]
masterList = firstName + lastName + techId + creditsTaken + qualityPoints
print(masterList)
#Write the output
#outputFile.write(firstName)
#outputFile.write(lastName)
#outputFile.write(techId)
#outputFile.write(str("%-10s"%creditsTaken))
#outputFile.write(str("%-10s"%qualityPoints))
#close file
#inputFile.close()
#outputFile.close()
main()
| [
"[email protected]"
] | |
4c0b163a7460d2fd4bc039cf7ea4f217d04db9cf | 47ce68e1ff970318fd31ac43405d0e1fa3594bf6 | /Models/biGAN/lowerDimBiganXEntropy.py | c888aeec7669d02cd851651a907718f7717dec9c | [
"BSD-3-Clause"
] | permissive | Midoriii/Anomaly_Detection_Diploma | 7196da379f8aefbd4546ca23e8303d1829e059fb | 11145e3e5210a4e45a33d98b138213edb7bc5d3d | refs/heads/master | 2023-03-25T20:42:56.961210 | 2021-03-14T01:13:39 | 2021-03-14T01:13:39 | 261,205,472 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,537 | py | '''
Copyright (c) 2021, Štěpán Beneš
Basic bigAN net, using cross entropy as loss and made to work on 192x192
'''
import numpy as np
from Models.biGAN.BaseBiganModel import BaseBiganModel
from Models.Losses.custom_losses import wasserstein_loss
from Models.biGAN.weightclip_constraint import WeightClip
from keras.layers import Input, Reshape, Dense, Flatten, concatenate
from keras.layers import UpSampling2D, Conv2D, MaxPooling2D, BatchNormalization, Dropout, LeakyReLU
from keras.models import Model
from keras.optimizers import RMSprop, Adam, SGD
class lowerDimBiganXEntropy(BaseBiganModel):
def __init__(self, input_shape, latent_dim=24, lr=0.0005, w_clip=0.01, batch_size=4):
super().__init__(input_shape, latent_dim, lr, w_clip, batch_size)
self.name = "lowerDimBiganXEntropy"
g_optimizer = Adam(lr=self.lr, beta_1=0.5)
d_optimizer = SGD(lr=self.lr)
self.disc_labels_real = np.zeros((self.batch_size, 1))
self.genc_labels_real = np.zeros((self.batch_size, 1))
self.genc_labels_fake = np.ones((self.batch_size, 1))
self.disc_labels_fake = np.ones((self.batch_size, 1))
self.d = self.build_discriminator()
self.d.compile(optimizer=d_optimizer, loss='binary_crossentropy', metrics=['accuracy'])
self.g = self.build_generator()
self.e = self.build_encoder()
# The Discriminator part in GE model won't be trainable - GANs take turns.
# Since the Discrimiantor itself has been previously compiled, this won't affect it.
self.d.trainable = False
self.ge = self.build_ge_enc()
self.ge.compile(optimizer=g_optimizer, loss=['binary_crossentropy', 'binary_crossentropy'])
return
def build_generator(self):
z_input = Input(shape=[self.latent_dim])
x = Dense(6*6*32)(z_input)
x = Reshape([6, 6, 32])(x)
# 6 -> 12
x = Conv2D(32, (3, 3), padding='same')(x)
x = LeakyReLU(0.1)(x)
x = UpSampling2D((2, 2))(x)
# 12 -> 24
x = Conv2D(32, (3, 3), padding='same')(x)
x = LeakyReLU(0.1)(x)
x = UpSampling2D((2, 2))(x)
# 24 -> 48
x = Conv2D(32, (3, 3), padding='same')(x)
x = LeakyReLU(0.1)(x)
x = UpSampling2D((2, 2))(x)
# 48 -> 96
x = Conv2D(32, (3, 3), padding='same')(x)
x = LeakyReLU(0.1)(x)
x = UpSampling2D((2, 2))(x)
# 96 -> 192
x = Conv2D(32, (3, 3), padding='same')(x)
x = LeakyReLU(0.1)(x)
x = UpSampling2D((2, 2))(x)
x = Conv2D(1, (3, 3), activation='tanh', padding='same')(x)
return Model(inputs=z_input, outputs=x)
def build_encoder(self):
img_input = Input(shape=[self.input_shape, self.input_shape, 1])
# 192 -> 96
x = Conv2D(32, (3, 3), padding='same')(img_input)
x = LeakyReLU(0.1)(x)
x = MaxPooling2D((2, 2), padding='same')(x)
# 96 -> 48
x = Conv2D(32, (3, 3), padding='same')(x)
x = LeakyReLU(0.1)(x)
x = MaxPooling2D((2, 2), padding='same')(x)
# 48 -> 24
x = Conv2D(32, (3, 3), padding='same')(x)
x = LeakyReLU(0.1)(x)
x = MaxPooling2D((2, 2), padding='same')(x)
# 24 -> 12
x = Conv2D(32, (3, 3), padding='same')(x)
x = LeakyReLU(0.1)(x)
x = MaxPooling2D((2, 2), padding='same')(x)
# 12 -> 6
x = Conv2D(32, (3, 3), padding='same')(x)
x = LeakyReLU(0.1)(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Flatten()(x)
x = Dense(256)(x)
x = LeakyReLU(0.1)(x)
x = Dense(self.latent_dim)(x)
return Model(inputs=img_input, outputs=x)
def build_discriminator(self):
img_input = Input(shape=[self.input_shape, self.input_shape, 1])
z_input = Input(shape=[self.latent_dim])
# Latent
l = Dense(256)(z_input)
l = LeakyReLU(0.1)(l)
l = Dense(256)(l)
l = LeakyReLU(0.1)(l)
# Image
x = Conv2D(64, (3, 3), padding='same')(img_input)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
x = Dropout(rate=self.dropout)(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(64, (3, 3), padding='same')(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
x = Dropout(rate=self.dropout)(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(128, (3, 3), padding='same')(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
x = Dropout(rate=self.dropout)(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(128, (3, 3), padding='same')(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
x = Dropout(rate=self.dropout)(x)
x = MaxPooling2D((2, 2), padding='same')(x)
# Joint
x = Flatten()(x)
x = concatenate([x, l])
x = Dense(256)(x)
x = LeakyReLU(0.1)(x)
x = Dense(1, activation='sigmoid')(x)
return Model(inputs=[img_input, z_input], outputs=x)
def build_ge_enc(self):
img_input = Input(shape=[self.input_shape, self.input_shape, 1])
z_input = Input(shape=[self.latent_dim])
fake_imgs = self.g(z_input)
critic_fake = self.d([fake_imgs, z_input])
fake_z = self.e(img_input)
critic_real = self.d([img_input, fake_z])
return Model(inputs=[img_input, z_input], outputs=[critic_real, critic_fake])
| [
"[email protected]"
] | |
601f6263f6c1ff4c717652ab7afb96c3d9ee243a | 214838dc887c044c7b10f26a6fd85ee231ad8107 | /vendor/github.com/google/protobuf/update_version.py | 7a961ae1397c6d597b84dfbd47b7d45b2ff0337b | [
"Apache-2.0",
"LicenseRef-scancode-protobuf"
] | permissive | coolbreeze000/ovs-gnxi | 5c486e192bad4880bff6ea38f6969417986fd262 | ad9c0bf3565a3cf8dfe5ca9929c2632a57fd279b | refs/heads/master | 2021-04-18T21:31:53.828567 | 2019-04-22T21:01:02 | 2019-04-22T21:01:02 | 126,850,919 | 2 | 1 | Apache-2.0 | 2019-04-22T21:01:03 | 2018-03-26T15:36:47 | Go | UTF-8 | Python | false | false | 7,883 | py | #!/usr/bin/env python
import datetime
import re
import sys
from xml.dom import minidom
if len(sys.argv) < 2:
print """
[ERROR] Please specify a version.
Example:
./update_version.py 2.1.3
"""
exit(1)
NEW_VERSION = sys.argv[1]
NEW_VERSION_INFO = NEW_VERSION.split('.')
if len(NEW_VERSION_INFO) != 3:
print """
[ERROR] Version must be in the format <MAJOR>.<MINOR>.<MICRO>
Example:
./update_version.py 2.1.3
"""
exit(1)
def Find(elem, tagname):
for child in elem.childNodes:
if child.nodeName == tagname:
return child
return None
def FindAndClone(elem, tagname):
return Find(elem, tagname).cloneNode(True)
def ReplaceText(elem, text):
elem.firstChild.replaceWholeText(text)
def RewriteXml(filename, rewriter, add_xml_prefix=True):
document = minidom.parse(filename)
rewriter(document)
# document.toxml() always prepend the XML version without inserting new line.
# We wants to preserve as much of the original formatting as possible, so we
# will remove the default XML version and replace it with our custom one when
# whever necessary.
content = document.toxml().replace('<?xml version="1.0" ?>', '')
file_handle = open(filename, 'wb')
if add_xml_prefix:
file_handle.write('<?xml version="1.0" encoding="UTF-8"?>\n')
file_handle.write(content)
file_handle.write('\n')
file_handle.close()
def RewriteTextFile(filename, line_rewriter):
lines = open(filename, 'r').readlines()
updated_lines = []
for line in lines:
updated_lines.append(line_rewriter(line))
if lines == updated_lines:
print '%s was not updated. Please double check.' % filename
f = open(filename, 'w')
f.write(''.join(updated_lines))
f.close()
def UpdateConfigure():
RewriteTextFile('configure.ac',
lambda line : re.sub(
r'^AC_INIT\(\[Protocol Buffers\],\[.*\],\[[email protected]\],\[protobuf\]\)$',
('AC_INIT([Protocol Buffers],[%s],[[email protected]],[protobuf])'
% NEW_VERSION),
line))
def UpdateCpp():
cpp_version = '%s00%s00%s' % (
NEW_VERSION_INFO[0], NEW_VERSION_INFO[1], NEW_VERSION_INFO[2])
def RewriteCpp(line):
line = re.sub(
r'^#define GOOGLE_PROTOBUF_VERSION .*$',
'#define GOOGLE_PROTOBUF_VERSION %s' % cpp_version,
line)
line = re.sub(
r'^#define PROTOBUF_VERSION .*$',
'#define PROTOBUF_VERSION %s' % cpp_version,
line)
if NEW_VERSION_INFO[2] == '0':
line = re.sub(
r'^#define GOOGLE_PROTOBUF_MIN_LIBRARY_VERSION .*$',
'#define GOOGLE_PROTOBUF_MIN_LIBRARY_VERSION %s' % cpp_version,
line)
line = re.sub(
r'^#define PROTOBUF_MIN_HEADER_VERSION_FOR_PROTOC .*$',
'#define PROTOBUF_MIN_HEADER_VERSION_FOR_PROTOC %s' % cpp_version,
line)
line = re.sub(
r'^#define GOOGLE_PROTOBUF_MIN_PROTOC_VERSION .*$',
'#define GOOGLE_PROTOBUF_MIN_PROTOC_VERSION %s' % cpp_version,
line)
line = re.sub(
r'^static const int kMinHeaderVersionForLibrary = .*$',
'static const int kMinHeaderVersionForLibrary = %s;' % cpp_version,
line)
line = re.sub(
r'^static const int kMinHeaderVersionForProtoc = .*$',
'static const int kMinHeaderVersionForProtoc = %s;' % cpp_version,
line)
return line
RewriteTextFile('src/google/protobuf/stubs/common.h', RewriteCpp)
RewriteTextFile('src/google/protobuf/port_def.inc', RewriteCpp)
def UpdateCsharp():
RewriteXml('csharp/src/Google.Protobuf/Google.Protobuf.csproj',
lambda document : ReplaceText(
Find(Find(document.documentElement, 'PropertyGroup'), 'VersionPrefix'),
NEW_VERSION),
add_xml_prefix=False)
RewriteXml('csharp/Google.Protobuf.Tools.nuspec',
lambda document : ReplaceText(
Find(Find(document.documentElement, 'metadata'), 'version'),
NEW_VERSION))
def UpdateJava():
RewriteXml('java/pom.xml',
lambda document : ReplaceText(
Find(document.documentElement, 'version'), NEW_VERSION))
RewriteXml('java/bom/pom.xml',
lambda document : ReplaceText(
Find(document.documentElement, 'version'), NEW_VERSION))
RewriteXml('java/core/pom.xml',
lambda document : ReplaceText(
Find(Find(document.documentElement, 'parent'), 'version'),
NEW_VERSION))
RewriteXml('java/util/pom.xml',
lambda document : ReplaceText(
Find(Find(document.documentElement, 'parent'), 'version'),
NEW_VERSION))
RewriteXml('protoc-artifacts/pom.xml',
lambda document : ReplaceText(
Find(document.documentElement, 'version'), NEW_VERSION))
def UpdateJavaScript():
RewriteTextFile('js/package.json',
lambda line : re.sub(
r'^ "version": ".*",$',
' "version": "%s",' % NEW_VERSION,
line))
def UpdateMakefile():
protobuf_version_offset = 11
expected_major_version = '3'
if NEW_VERSION_INFO[0] != expected_major_version:
print """[ERROR] Major protobuf version has changed. Please update
update_version.py to readjust the protobuf_version_offset and
expected_major_version such that the PROTOBUF_VERSION in src/Makefile.am is
always increasing.
"""
exit(1)
protobuf_version_info = '%s:%s:0' % (
int(NEW_VERSION_INFO[1]) + protobuf_version_offset, NEW_VERSION_INFO[2])
RewriteTextFile('src/Makefile.am',
lambda line : re.sub(
r'^PROTOBUF_VERSION = .*$',
'PROTOBUF_VERSION = %s' % protobuf_version_info,
line))
def UpdateObjectiveC():
RewriteTextFile('Protobuf.podspec',
lambda line : re.sub(
r"^ s.version = '.*'$",
" s.version = '%s'" % NEW_VERSION,
line))
def UpdatePhp():
def Callback(document):
def CreateNode(tagname, indent, children):
elem = document.createElement(tagname)
indent += 1
for child in children:
elem.appendChild(document.createTextNode('\n' + (' ' * indent)))
elem.appendChild(child)
indent -= 1
elem.appendChild(document.createTextNode('\n' + (' ' * indent)))
return elem
root = document.documentElement
version = Find(root, 'version')
ReplaceText(Find(version, 'release'), NEW_VERSION)
ReplaceText(Find(version, 'api'), NEW_VERSION)
now = datetime.datetime.now()
ReplaceText(Find(root, 'date'), now.strftime('%Y-%m-%d'))
ReplaceText(Find(root, 'time'), now.strftime('%H:%M:%S'))
changelog = Find(root, 'changelog')
for old_version in changelog.getElementsByTagName('version'):
if Find(old_version, 'release').firstChild.nodeValue == NEW_VERSION:
print ('[WARNING] Version %s already exists in the change log.'
% NEW_VERSION)
return
changelog.appendChild(document.createTextNode(' '))
stability = Find(root, 'stability')
release = CreateNode('release', 2, [
CreateNode('version', 3, [
FindAndClone(version, 'release'),
FindAndClone(version, 'api')
]),
CreateNode('stability', 3, [
FindAndClone(stability, 'release'),
FindAndClone(stability, 'api')
]),
FindAndClone(root, 'date'),
FindAndClone(root, 'time'),
FindAndClone(root, 'license'),
FindAndClone(root, 'notes')
])
changelog.appendChild(release)
changelog.appendChild(document.createTextNode('\n '))
RewriteXml('php/ext/google/protobuf/package.xml', Callback)
def UpdatePython():
RewriteTextFile('python/google/protobuf/__init__.py',
lambda line : re.sub(
r"^__version__ = '.*'$",
"__version__ = '%s'" % NEW_VERSION,
line))
def UpdateRuby():
RewriteTextFile('ruby/google-protobuf.gemspec',
lambda line : re.sub(
r'^ s.version = ".*"$',
' s.version = "%s"' % NEW_VERSION,
line))
UpdateConfigure()
UpdateCsharp()
UpdateCpp()
UpdateJava()
UpdateJavaScript()
UpdateMakefile()
UpdateObjectiveC()
UpdatePhp()
UpdatePython()
UpdateRuby()
| [
"[email protected]"
] | |
b3171acf6e6391d74104fb29632f79fe9666a420 | 41b1db1c40fd7a17633117ea6c35641611ad74e2 | /main.py | 3f767ecac3018a83c43218fbeb4cad3f7d40eb1c | [] | no_license | samtsevich/VCNEB_input_generator | 0969a6b345308d74a4f5973ea43209c61e34e6ff | 5194a018cb15fc487b70171744c30c3d0990ce57 | refs/heads/master | 2021-01-21T16:48:33.741609 | 2017-05-20T18:41:33 | 2017-05-20T18:41:33 | 91,909,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,703 | py | '''
Code for creating linear interpolation between 2 structures.
IMPORTANT:
initial and final structures should be POSCAR-files of VASP5
'''
from __future__ import division
__author__ = 'asamtsevich'
from ase.atoms import Atoms
from ase.io.vasp import read_vasp, write_vasp
import argparse
import os
import numpy as np
import sys
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='VCNEB_tool')
# parser.add_argument("-v", "--version", dest="version", action="store_true",
# help="show program's version number and exit")
parser.add_argument('-i', dest='initStructure', type=str,
help='path to initial POSCAR file')
parser.add_argument('-f', dest='finalStructure', type=str,
help='path to final POSCAR file')
parser.add_argument('-o', dest='output', type=str,
help='path to output file')
parser.add_argument('-N', dest='N', type=int,
help='num of images that will be in output file')
# parser.add_argument("-p", "--parameter", action='callback', dest="parm",
# help="specify parameter to get help. If no value or 'all' value is specified, all INPUT.txt parameters will be shown",
# metavar="PARM")
args = parser.parse_args()
if not args.initStructure:
print('Please provide initStructure path')
parser.print_help()
if not args.finalStructure:
print('Please provide finalStructure path')
parser.print_help()
if not args.output:
print('Please provide output path')
parser.print_help()
if args.N <= 2:
print('Please provide number of images > 2')
parser.print_help()
initStructure = read_vasp(args.initStructure)
finalStructure = read_vasp(args.finalStructure)
symbols = initStructure.get_chemical_symbols()
initCoord = initStructure.get_scaled_positions()
initCell = initStructure.get_cell()
assert len(initStructure) == len(finalStructure)
N_system = len(initStructure)
N = args.N - 1
diffCoord = finalStructure.get_scaled_positions() - initCoord
for i in range(N_system):
for j in range(0,3):
if diffCoord[i, j] > 0.5:
initCoord[i,j] += 1.0
elif diffCoord[i, j] < -0.5:
initCoord[i, j] -= 1.0
diffCoord = finalStructure.get_scaled_positions() - initCoord
assert (np.abs(diffCoord) < 0.5).all()
# for atom in diffCoord:
# for x in atom:
# if x > 0.5:
# x -= 1.0
# elif x < -0.5:
# x += 1.0
# stepCoord = diffCoord / N
stepCoord = diffCoord / N
stepCell = (finalStructure.get_cell() - initCell) / N
TMP_STRUCTURE_PATH = 'tmp.POSCAR'
with open(args.output, 'w') as fp:
write_vasp(TMP_STRUCTURE_PATH, initStructure, label='Image_ini ', direct=True, vasp5=True)
with open(TMP_STRUCTURE_PATH, 'r') as f:
fp.write(f.read())
for i in range(N-1):
current = Atoms(symbols=symbols, scaled_positions=initCoord + (i+1)*stepCoord, cell=initCell + (i+1)*stepCell)
write_vasp(TMP_STRUCTURE_PATH, current, label='Image ' + str(i+2), direct=True, vasp5=True)
with open(TMP_STRUCTURE_PATH, 'r') as f:
fp.write(f.read())
write_vasp(TMP_STRUCTURE_PATH, finalStructure, label='Image_end ', direct=True, vasp5=True)
with open(TMP_STRUCTURE_PATH, 'r') as f:
fp.write(f.read())
# cleaning unnecessary files
os.remove(TMP_STRUCTURE_PATH)
print('Building complete.')
| [
"[email protected]"
] | |
33d36ffcb8449ec8a1abc25e2289481273002db4 | 13f198b5837fe031ca972994a40e29579710708d | /eshop/cart/migrations/0003_delete_userorders.py | a563b7bd23692aa5a8f9795340557e1e2bc2624d | [] | no_license | jenish2/ElectronicShop | 44e60e3f93b8b8558dd045a255e95bd8764ed9e4 | c2b31c30b21383e103bced011c59c056d3220b07 | refs/heads/master | 2023-05-31T23:06:16.573868 | 2021-06-21T05:24:10 | 2021-06-21T05:24:10 | 267,392,752 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 288 | py | # Generated by Django 3.0.4 on 2020-04-24 12:13
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cart', '0002_userorders'),
]
operations = [
migrations.DeleteModel(
name='UserOrders',
),
]
| [
"[email protected]"
] | |
39ba074dc4ed2dfc458d0f96d05397e202baecd3 | 5c2ed67bdaaa459f9332479bb6e499434bfee5db | /levels/pythonchallange_000/pythonchallange_000.py | db45bd906adbfb6fd1f2bf2977da6f2fd1d546fe | [] | no_license | reedcourty/pythonchallenge | 2b050aed20de973f88118dfada7dd39a38b4fc20 | 766b284831cee01d7e1e7f2b4251d9152c851077 | refs/heads/master | 2021-03-12T23:16:50.878070 | 2015-07-19T16:28:48 | 2015-07-19T16:33:41 | 2,092,259 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 272 | py | # http://www.pythonchallenge.com/pc/def/0.html
# Hint: try to change the URL address.
# On http://www.pythonchallenge.com/pc/def/1.html
# 2**38 is much much larger.
import webbrowser
webbrowser.open('http://www.pythonchallenge.com/pc/def/' + str(2**38) + '.html')
| [
"[email protected]"
] | |
7465cad3644655de43d7349acad63a4c4b35575d | 4ceba3652489088d8f9a264a654d2e6385fcc53e | /rbb_server/test/rbb_server_test/test_users.py | cf4d5928bd445f4e76cc14563a61fe23e6bfc31e | [
"MIT"
] | permissive | AMZ-Driverless/rbb_core | 344ce8a60dd1c91593e0b5f38751164bec81906d | 992380baf06271516ef4d2944e12ea941560ffce | refs/heads/master | 2022-05-10T19:15:03.328439 | 2022-04-03T16:57:54 | 2022-04-03T16:57:54 | 174,819,553 | 62 | 16 | MIT | 2022-04-21T09:30:31 | 2019-03-10T12:20:17 | Python | UTF-8 | Python | false | false | 6,358 | py | # AMZ-Driverless
# Copyright (c) 2019 Authors:
# - Huub Hendrikx <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from rbb_client.api_client import ApiException
from rbb_client.models import User
from rbb_server_test import ClientServerBaseTestCase
class TestUsers(ClientServerBaseTestCase):
def test_get_me(self):
api = self.get_admin_api()
result = api.get_current_user() # type: User
self.assertEqual(result.alias, 'admin')
def test_get_unknown_user(self):
api = self.get_admin_api()
try:
returned_user = api.get_user_account("this-user-for-sure-doesnt-exist")
self.fail("Exception should be thrown")
except ApiException as e:
self.assertEquals(e.status, 404)
def test_get_user_no_permission(self):
api = self.get_user_api()
try:
returned_user = api.get_user_account("admin")
self.fail("Exception should be thrown")
except ApiException as e:
self.assertEquals(e.status, 403)
def test_add_new_user(self):
api = self.get_admin_api()
user = User()
user.alias = "test_user"
user.email = "[email protected]"
user.full_name = "Test User"
user.password = "testtest"
returned_user = api.put_user_account(user.alias, user)
self.assertEqual(user.alias, returned_user.alias)
self.assertEqual(user.email, returned_user.email)
self.assertEqual(user.full_name, returned_user.full_name)
self.assertIsNone(returned_user.password)
# Login as created user
api = self.get_api("test_user", "testtest")
fetched_user = api.get_current_user()
# Check that data is the same
self.assertEqual(user.alias, fetched_user.alias)
self.assertEqual(user.email, fetched_user.email)
self.assertEqual(user.full_name, fetched_user.full_name)
self.assertIsNone(fetched_user.password)
def test_change_user_permissions(self):
api = self.get_admin_api()
user = User()
user.alias = "test_user_permissions"
user.email = "[email protected]"
user.full_name = "Test User"
user.password = "testtest"
returned_user = api.put_user_account(user.alias, user)
self.assertGreaterEqual(len(returned_user.permissions), 4)
perm_dict = {}
for perm in returned_user.permissions:
perm_dict[perm.identifier] = perm
self.assertFalse(perm.granted)
perm_dict['admin'].granted = False
perm_dict['queue_result_access'].granted = True
api.put_user_account(returned_user.alias, returned_user)
returned_user = api.get_user_account(user.alias)
perm_dict = {}
for perm in returned_user.permissions:
perm_dict[perm.identifier] = perm
self.assertFalse(perm_dict['admin'].granted)
self.assertTrue(perm_dict['queue_result_access'].granted)
self.assertFalse(perm_dict['store_secret_access'].granted)
def test_add_new_user_no_permission(self):
api = self.get_user_api()
user = User()
user.alias = "test_user"
user.email = "[email protected]"
user.full_name = "Test User"
user.password = "testtest"
try:
returned_user = api.put_user_account(user.alias, user)
self.fail("Exception should be thrown")
except ApiException as e:
self.assertEquals(e.status, 403)
def test_change_password(self):
api = self.get_admin_api()
user = User()
user.alias = "test_user_password"
user.email = "[email protected]"
user.full_name = "Test User"
user.password = "testtest"
api.put_user_account(user.alias, user)
user_data = api.get_user_account(user.alias)
user_data.password = "changed_password"
api.put_user_account(user.alias, user_data)
# Login as created user
api = self.get_api("test_user_password", "changed_password")
fetched_user = api.get_current_user()
self.assertEquals(fetched_user.alias, user.alias)
def test_delete_user(self):
api = self.get_admin_api()
user = User()
user.alias = "to_delete"
user.email = "[email protected]"
user.full_name = "Test User"
user.password = "testtest"
# Add user
returned_user = api.put_user_account(user.alias, user)
# Check it's there
fetched_user = api.get_user_account(user.alias)
self.assertEqual(user.alias, fetched_user.alias)
# Delete
api.delete_user_account(user.alias)
# Check it's gone
try:
returned_user = api.get_user_account(user.alias)
self.fail("User not found exception should be thrown")
except ApiException as e:
self.assertEquals(e.status, 404)
def test_list_users(self):
api = self.get_admin_api()
accounts = api.list_user_accounts()
self.assertGreater(len(accounts), 0)
account_dict = {}
for account in accounts:
account_dict[account.alias] = account
self.assertEquals(account_dict['admin'].full_name, 'Admin')
self.assertEquals(account_dict['user'].full_name, 'User')
| [
"[email protected]"
] | |
8373a4365d7dcd05ee9388f921ba2b4d4ba9dbba | 91ac9ffb1bc34a0651d7f4b47a8363c9a9016ac3 | /python/Python27/template.py | a01cdcd004591378e829edc79f3f873e864a5354 | [] | no_license | suto3/git-public | 966b5c4967d2f192f23dc3b00cd42ac612725039 | 3c6d19cd4093247b7b459598960047e08b88392c | refs/heads/master | 2021-07-16T05:47:45.523546 | 2020-08-25T04:39:33 | 2020-08-25T04:39:33 | 9,160,496 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# テンプレートです
#
import sys
def main():
'''
メイン関数
'''
if __name__ == '__main__':
main()
#EOF
| [
"[email protected]"
] | |
cf6c0315c0e93791c5be5b0afc67a66c7de1c22f | f7bdc48d92862923e9e562aca0457197e5975cfb | /Src/ImageProcessing/Watershed/watershed.py | 456136a58de66deaa167fd006bbd8c8fdbeaae8b | [] | no_license | jackwiy/OpenCV-Python-Tutorials | 532556bcf87b66db1e6087c8d04d4ef790f7b7f1 | 01013e7f851effc97ec6932f689aef1ee81f4140 | refs/heads/master | 2022-11-29T03:59:36.410037 | 2020-08-13T18:02:43 | 2020-08-13T18:02:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,152 | py | #!/usr/bin/env python
'''
Watershed segmentation
=========
This program demonstrates the watershed segmentation algorithm
in OpenCV: watershed().
Usage
-----
watershed.py [image filename]
Keys
----
1-7 - switch marker color
SPACE - update segmentation
r - reset
a - toggle autoupdate
ESC - exit
'''
import numpy as np
import cv2
from Src.ToolBox.common import Sketcher
class App:
def __init__(self, fn):
self.img = cv2.imread(fn)
h, w = self.img.shape[:2]
self.markers = np.zeros((h, w), np.int32)
self.markers_vis = self.img.copy()
self.cur_marker = 1
self.colors = np.int32( list(np.ndindex(2, 2, 2)) ) * 255
self.auto_update = True
self.sketch = Sketcher('img', [self.markers_vis, self.markers], self.get_colors)
def get_colors(self):
return map(int, self.colors[self.cur_marker]), self.cur_marker
def watershed(self):
m = self.markers.copy()
cv2.watershed(self.img, m)
overlay = self.colors[np.maximum(m, 0)]
cv2.imshow('over', overlay)
vis = cv2.addWeighted(self.img, 0.5, overlay, 0.5, 0.0, dtype=cv2.CV_8UC3)
cv2.imshow('watershed', vis)
def run(self):
while True:
ch = 0xFF & cv2.waitKey(50)
if ch == 27:
break
if ch >= ord('1') and ch <= ord('7'):
self.cur_marker = ch - ord('0')
print 'marker: ', self.cur_marker
if ch == ord(' ') or (self.sketch.dirty and self.auto_update):
self.watershed()
self.sketch.dirty = False
if ch in [ord('a'), ord('A')]:
self.auto_update = not self.auto_update
print 'auto_update if', ['off', 'on'][self.auto_update]
if ch in [ord('r'), ord('R')]:
self.markers[:] = 0
self.markers_vis[:] = self.img
self.sketch.show()
cv2.destroyAllWindows()
if __name__ == '__main__':
import sys
try: fn = sys.argv[1]
except: fn = '../../../Datas/fruits.jpg'
print __doc__
App(fn).run()
| [
"[email protected]"
] | |
371dd453508473d1f3f11653520e8f89490ddb56 | 5bd18933a2b88d59916b17c52056e9e70ba44d52 | /Assignment 2/Code/Francois/3. Crimes per District per Year.py | 3056bde0da6875f011839c1a3e8e0da58daf768d | [] | no_license | u14006512/MIT-805-Assignments | c78465345f181c3da6a77aa1b9c05de41113bf89 | 9be0e1b4813ff2ab83bfd9c46b68617161ce1ea8 | refs/heads/master | 2020-03-27T08:39:17.813027 | 2018-10-18T14:36:58 | 2018-10-18T14:36:58 | 146,273,787 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | import Shared as shared
data = shared.getData(25000)
counts = data.groupby(['District', 'Year']).size().to_dict()
print(counts) | [
"[email protected]"
] | |
3c438f0750ba79289ddaa732dc702e72113b7ea5 | e8bef0f6dc2bd90f25e4bd2f32668ac31f2f0fe2 | /variable_neutral_line_manipulator/gui/widgets/result_text_widget.py | bece05c5efcff8c895d0bd9ccc9be9100c3aa8b8 | [] | no_license | MINGXUANCALVIN/variableNeutralLineManipulator | cdf3db4314029d84e3831ecb02570b0171615273 | c2356a49c9efd62d4ec6aa2bd83bcce26ede3707 | refs/heads/master | 2023-04-27T00:39:42.232858 | 2020-05-30T05:24:28 | 2020-05-30T05:24:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | import math
from ..gui_common import *
from ..backend import *
class ResultTextWidget(QWidget):
def __init__(self, parent=None):
super().__init__(parent=parent)
self.mainLayout = QVBoxLayout()
self.text = QTextEdit("Result:\n Not computed yet")
self.mainLayout.addWidget(self.text)
self.setLayout(self.mainLayout)
StateManagement().text_result_stream.subscribe(self._showResult)
def _showResult(self, s):
self.text.setText(s) | [
"[email protected]"
] | |
7a30bb5c72f7087e459db195116e66df3944d94c | eebf29bbf0b78aca02b9ed4c403fa56c9249fef8 | /cloto/__init__.py | ab118ddd3ccd94692eb633f414bf6f664e58383c | [] | no_license | jesuspg/fiware-cloto | a19798498d9d7273f8836bc277128fad66b9dc1d | 7d55ed33c1d67c225568af2bb2aa9041d90ff8f9 | refs/heads/master | 2021-01-18T01:24:20.934195 | 2013-11-12T15:14:45 | 2013-11-12T15:14:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36 | py | s = 'Hello, world.'
str(s)
print(s)
| [
"[email protected]"
] | |
0829c068eca4809b37b25c8aaf5f7869cb1183a4 | 91639fea573828d08e8642a9022fe2ec62319414 | /future/backports/email/quoprimime.py | 6ede1ca4aa682af2b6276361d9dd94f8dbf4288a | [
"MIT"
] | permissive | agincel/AdamTestBot | 9787a22f25a3bfc2bbab0b6c6e66b857cb369f32 | fee093c3dd944881bd92c9180fbb3a13700673da | refs/heads/master | 2020-05-22T04:26:39.241479 | 2016-12-29T22:15:04 | 2016-12-29T22:15:04 | 44,931,116 | 0 | 8 | null | 2016-10-18T22:04:33 | 2015-10-25T21:40:35 | Python | UTF-8 | Python | false | false | 11,249 | py | # Copyright (C) 2001-2006 Python Software Foundation
# Author: Ben Gertzfield
# Contact: [email protected]
"""Quoted-printable content transfer encoding per RFCs 2045-2047.
This module handles the content transfer encoding method defined in RFC 2045
to encode US ASCII-like 8-bit data called `quoted-printable'. It is used to
safely encode text that is in a character set similar to the 7-bit US ASCII
character set, but that includes some 8-bit characters that are normally not
allowed in email bodies or headers.
Quoted-printable is very space-inefficient for encoding binary files; use the
email.base64mime module for that instead.
This module provides an interface to encode and decode both headers and bodies
with quoted-printable encoding.
RFC 2045 defines a method for including character set information in an
`encoded-word' in a header. This method is commonly used for 8-bit real names
in To:/From:/Cc: etc. fields, as well as Subject: lines.
This module does not do the line wrapping or end-of-line character
conversion necessary for proper internationalized headers; it only
does dumb encoding and decoding. To deal with the various line
wrapping issues, use the email.header module.
"""
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future.builtins import bytes, chr, dict, int, range, super
__all__ = [
'body_decode',
'body_encode',
'body_length',
'decode',
'decodestring',
'header_decode',
'header_encode',
'header_length',
'quote',
'unquote',
]
import re
import io
from string import ascii_letters, digits, hexdigits
CRLF = '\r\n'
NL = '\n'
EMPTYSTRING = ''
# Build a mapping of octets to the expansion of that octet. Since we're only
# going to have 256 of these things, this isn't terribly inefficient
# space-wise. Remember that headers and bodies have different sets of safe
# characters. Initialize both maps with the full expansion, and then override
# the safe bytes with the more compact form.
_QUOPRI_HEADER_MAP = dict((c, '=%02X' % c) for c in range(256))
_QUOPRI_BODY_MAP = _QUOPRI_HEADER_MAP.copy()
# Safe header bytes which need no encoding.
for c in bytes(b'-!*+/' + ascii_letters.encode('ascii') + digits.encode('ascii')):
_QUOPRI_HEADER_MAP[c] = chr(c)
# Headers have one other special encoding; spaces become underscores.
_QUOPRI_HEADER_MAP[ord(' ')] = '_'
# Safe body bytes which need no encoding.
for c in bytes(b' !"#$%&\'()*+,-./0123456789:;<>'
b'?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`'
b'abcdefghijklmnopqrstuvwxyz{|}~\t'):
_QUOPRI_BODY_MAP[c] = chr(c)
# Helpers
def header_check(octet):
"""Return True if the octet should be escaped with header quopri."""
return chr(octet) != _QUOPRI_HEADER_MAP[octet]
def body_check(octet):
"""Return True if the octet should be escaped with body quopri."""
return chr(octet) != _QUOPRI_BODY_MAP[octet]
def header_length(bytearray):
"""Return a header quoted-printable encoding length.
Note that this does not include any RFC 2047 chrome added by
`header_encode()`.
:param bytearray: An array of bytes (a.k.a. octets).
:return: The length in bytes of the byte array when it is encoded with
quoted-printable for headers.
"""
return sum(len(_QUOPRI_HEADER_MAP[octet]) for octet in bytearray)
def body_length(bytearray):
"""Return a body quoted-printable encoding length.
:param bytearray: An array of bytes (a.k.a. octets).
:return: The length in bytes of the byte array when it is encoded with
quoted-printable for bodies.
"""
return sum(len(_QUOPRI_BODY_MAP[octet]) for octet in bytearray)
def _max_append(L, s, maxlen, extra=''):
if not isinstance(s, str):
s = chr(s)
if not L:
L.append(s.lstrip())
elif len(L[-1]) + len(s) <= maxlen:
L[-1] += extra + s
else:
L.append(s.lstrip())
def unquote(s):
"""Turn a string in the form =AB to the ASCII character with value 0xab"""
return chr(int(s[1:3], 16))
def quote(c):
return '=%02X' % ord(c)
def header_encode(header_bytes, charset='iso-8859-1'):
"""Encode a single header line with quoted-printable (like) encoding.
Defined in RFC 2045, this `Q' encoding is similar to quoted-printable, but
used specifically for email header fields to allow charsets with mostly 7
bit characters (and some 8 bit) to remain more or less readable in non-RFC
2045 aware mail clients.
charset names the character set to use in the RFC 2046 header. It
defaults to iso-8859-1.
"""
# Return empty headers as an empty string.
if not header_bytes:
return ''
# Iterate over every byte, encoding if necessary.
encoded = []
for octet in header_bytes:
encoded.append(_QUOPRI_HEADER_MAP[octet])
# Now add the RFC chrome to each encoded chunk and glue the chunks
# together.
return '=?%s?q?%s?=' % (charset, EMPTYSTRING.join(encoded))
class _body_accumulator(io.StringIO):
def __init__(self, maxlinelen, eol, *args, **kw):
super().__init__(*args, **kw)
self.eol = eol
self.maxlinelen = self.room = maxlinelen
def write_str(self, s):
"""Add string s to the accumulated body."""
self.write(s)
self.room -= len(s)
def newline(self):
"""Write eol, then start new line."""
self.write_str(self.eol)
self.room = self.maxlinelen
def write_soft_break(self):
"""Write a soft break, then start a new line."""
self.write_str('=')
self.newline()
def write_wrapped(self, s, extra_room=0):
"""Add a soft line break if needed, then write s."""
if self.room < len(s) + extra_room:
self.write_soft_break()
self.write_str(s)
def write_char(self, c, is_last_char):
if not is_last_char:
# Another character follows on this line, so we must leave
# extra room, either for it or a soft break, and whitespace
# need not be quoted.
self.write_wrapped(c, extra_room=1)
elif c not in ' \t':
# For this and remaining cases, no more characters follow,
# so there is no need to reserve extra room (since a hard
# break will immediately follow).
self.write_wrapped(c)
elif self.room >= 3:
# It's a whitespace character at end-of-line, and we have room
# for the three-character quoted encoding.
self.write(quote(c))
elif self.room == 2:
# There's room for the whitespace character and a soft break.
self.write(c)
self.write_soft_break()
else:
# There's room only for a soft break. The quoted whitespace
# will be the only content on the subsequent line.
self.write_soft_break()
self.write(quote(c))
def body_encode(body, maxlinelen=76, eol=NL):
"""Encode with quoted-printable, wrapping at maxlinelen characters.
Each line of encoded text will end with eol, which defaults to "\\n". Set
this to "\\r\\n" if you will be using the result of this function directly
in an email.
Each line will be wrapped at, at most, maxlinelen characters before the
eol string (maxlinelen defaults to 76 characters, the maximum value
permitted by RFC 2045). Long lines will have the 'soft line break'
quoted-printable character "=" appended to them, so the decoded text will
be identical to the original text.
The minimum maxlinelen is 4 to have room for a quoted character ("=XX")
followed by a soft line break. Smaller values will generate a
ValueError.
"""
if maxlinelen < 4:
raise ValueError("maxlinelen must be at least 4")
if not body:
return body
# The last line may or may not end in eol, but all other lines do.
last_has_eol = (body[-1] in '\r\n')
# This accumulator will make it easier to build the encoded body.
encoded_body = _body_accumulator(maxlinelen, eol)
lines = body.splitlines()
last_line_no = len(lines) - 1
for line_no, line in enumerate(lines):
last_char_index = len(line) - 1
for i, c in enumerate(line):
if body_check(ord(c)):
c = quote(c)
encoded_body.write_char(c, i==last_char_index)
# Add an eol if input line had eol. All input lines have eol except
# possibly the last one.
if line_no < last_line_no or last_has_eol:
encoded_body.newline()
return encoded_body.getvalue()
# BAW: I'm not sure if the intent was for the signature of this function to be
# the same as base64MIME.decode() or not...
def decode(encoded, eol=NL):
"""Decode a quoted-printable string.
Lines are separated with eol, which defaults to \\n.
"""
if not encoded:
return encoded
# BAW: see comment in encode() above. Again, we're building up the
# decoded string with string concatenation, which could be done much more
# efficiently.
decoded = ''
for line in encoded.splitlines():
line = line.rstrip()
if not line:
decoded += eol
continue
i = 0
n = len(line)
while i < n:
c = line[i]
if c != '=':
decoded += c
i += 1
# Otherwise, c == "=". Are we at the end of the line? If so, add
# a soft line break.
elif i+1 == n:
i += 1
continue
# Decode if in form =AB
elif i+2 < n and line[i+1] in hexdigits and line[i+2] in hexdigits:
decoded += unquote(line[i:i+3])
i += 3
# Otherwise, not in form =AB, pass literally
else:
decoded += c
i += 1
if i == n:
decoded += eol
# Special case if original string did not end with eol
if encoded[-1] not in '\r\n' and decoded.endswith(eol):
decoded = decoded[:-1]
return decoded
# For convenience and backwards compatibility w/ standard base64 module
body_decode = decode
decodestring = decode
def _unquote_match(match):
"""Turn a match in the form =AB to the ASCII character with value 0xab"""
s = match.group(0)
return unquote(s)
# Header decoding is done a bit differently
def header_decode(s):
"""Decode a string encoded with RFC 2045 MIME header `Q' encoding.
This function does not parse a full MIME header value encoded with
quoted-printable (like =?iso-8895-1?q?Hello_World?=) -- please use
the high level email.header class for that functionality.
"""
s = s.replace('_', ' ')
return re.sub(r'=[a-fA-F0-9]{2}', _unquote_match, s, re.ASCII)
| [
"[email protected]"
] | |
a0b703f87a403de60bd497d01dc11969567edd6c | 2eb8e3606a8df45d432fdf56ee9aa24942304526 | /rocketgram/api/shipping_option.py | 55db89dd5aca13265cd5374ea45adc78d30833c9 | [
"MIT"
] | permissive | KulZlaK/rocketgram | 22848293980ba44dd9fb63db28f34be36c437c84 | 09587deecffcd7ccc9529f4d9e51221888870f23 | refs/heads/master | 2022-07-27T23:25:51.254444 | 2020-05-15T21:36:57 | 2020-05-15T21:36:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 482 | py | # Copyright (C) 2015-2020 by Vd.
# This file is part of Rocketgram, the modern Telegram bot framework.
# Rocketgram is released under the MIT License (see LICENSE).
from dataclasses import dataclass
from typing import List
from .labeled_price import LabeledPrice
@dataclass(frozen=True)
class ShippingOption:
"""\
Represents ShippingOption object:
https://core.telegram.org/bots/api#shippingoption
"""
id: str
title: str
prices: List[LabeledPrice]
| [
"vd@"
] | vd@ |
4fcaed0256103e3eb8ace9827d79a215ae909c24 | 3dfb4ee39555b30e6e0c6fcdbef371864e69f694 | /google-cloud-sdk/.install/.backup/lib/surface/preview/app/__init__.py | 8250cdb4a7ea1553778dd1cecb732f0c19282aa6 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | MD-Anderson-Bioinformatics/NG-CHM_Galaxy | 41d1566d5e60416e13e023182ca4351304381a51 | dcf4886d4ec06b13282143ef795c5f0ff20ffee3 | refs/heads/master | 2021-06-02T21:04:12.194964 | 2021-04-29T14:45:32 | 2021-04-29T14:45:32 | 130,249,632 | 0 | 1 | null | 2020-07-24T18:35:21 | 2018-04-19T17:25:33 | Python | UTF-8 | Python | false | false | 2,988 | py | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The gcloud app group."""
import sys
from googlecloudsdk.calliope import base
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core.util import platforms
class UnsupportedPythonVersionError(exceptions.Error):
pass
# TODO(b/24169312): remove
CHANGE_WARNING = """\
The `gcloud preview app` surface is rapidly improving. Look out for
changing flags and new commands before the transition out of the `preview`
component. These changes will be documented in the Cloud SDK release notes
<https://goo.gl/X8apDJ> and via deprecation notices for changing commands.
If you would like to avoid changing behavior, please pin to a fixed version of
the Google Cloud SDK as described under the "Alternative Methods" section of the
Cloud SDK web site: <https://cloud.google.com/sdk/#alternative>.
"""
@base.Beta
class Appengine(base.Group):
"""Manage your App Engine app.
This set of commands allows you to deploy your app, manage your existing
deployments, and also run your app locally. These commands replace their
equivalents in the appcfg tool.
"""
detailed_help = {
'DESCRIPTION': '{description}',
'EXAMPLES': """\
To run your app locally in the development application server, run:
$ dev_appserver.py DEPLOYABLES
To create a new deployment of one or more modules, run:
$ {command} deploy DEPLOYABLES
To list your existing deployments, run:
$ {command} modules list
To generate config files for your source directory:
$ {command} gen-config
""",
}
def Filter(self, unused_context, unused_args):
# TODO(b/24169312): remove
if not properties.VALUES.app.suppress_change_warning.GetBool():
log.warn(CHANGE_WARNING)
properties.PersistProperty(properties.VALUES.app.suppress_change_warning,
'true')
if not platforms.PythonVersion().IsSupported():
raise UnsupportedPythonVersionError(
('Python 2.7 or greater is required for App Engine commands in '
'gcloud.\n\n'
'Your Python location: [{0}]\n\n'
'Please set the CLOUDSDK_PYTHON environment variable to point to a '
'supported version in order to use this command.'
).format(sys.executable))
| [
"[email protected]"
] | |
7540ca37e20d2873725274e65d5542aadd500b00 | 2179ae6ea7da99fd58c968281f9dcdb28684214c | /re - phone pattern.py | 8be97877789ce9174ccd425bce1f01bf8f1116df | [] | no_license | Wintus/MyPythonCodes | 86d5aad9fabae0f6ed58b63741e44135bfee903e | 316d20c39c2de5960b094611c092805895037df7 | refs/heads/master | 2023-08-31T21:07:27.630010 | 2023-08-21T18:00:33 | 2023-08-21T18:00:33 | 29,950,620 | 0 | 0 | null | 2023-08-21T18:00:01 | 2015-01-28T04:32:30 | Python | UTF-8 | Python | false | false | 533 | py | phonePattern = re.compile(r'''
# don't match beginning of string, number can start anywhere
(\d{3}) # area code is 3 digits (e.g. '800')
\D* # optional separator is any number of non-digits
(\d{3}) # trunk is 3 digits (e.g. '555')
\D* # optional separator
(\d{4}) # rest of number is 4 digits (e.g. '1212')
\D* # optional separator
(\d*) # extension is optional and can be any number of digits
$ # end of string
''', re.VERBOSE)
| [
"[email protected]"
] | |
f0519d1c749d0d1a71d9290c857d4bbbd150e781 | 1374cf697c8a83ebc4bbfd3a7ad52c2003b4788e | /pyDev/src/classifier/sklearn_svm_sample.py | 47c0d34fc624541dc6b813a8912481ac07e99e07 | [] | no_license | weskita/MachineLearning | cff28a40898b89f927eae744ac0fb3b8b2a40cbd | 6806c589c75c39563c558ecf0859d5c07d826b25 | refs/heads/master | 2016-09-06T04:25:18.898717 | 2015-06-23T08:14:21 | 2015-06-23T08:14:21 | 35,151,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,616 | py | # -*- coding: utf-8 -*-
"""
Sklear svm example
"""
print(__doc__)
#使用SVM 分类器
from sklearn import svm
from sklearn.datasets import load_svmlight_file
#将数据集分为训练集、检验集
from sklearn.cross_validation import train_test_split
from sklearn import cross_validation
#引入评价指标
from sklearn.metrics import confusion_matrix #计算混淆矩阵
from sklearn.metrics import matthews_corrcoef #计算MCC
from sklearn.metrics import roc_auc_score #计算MCC 只对二分类可以计算
from sklearn.metrics import accuracy_score #计算ACC
#引入数据
fr_n="/path/your_svm_file"
X,y=load_svmlight_file(fr_n)
# Run classifier
print("===cross validation===")
clf = svm.SVC(kernel='rbf')
scores=cross_validation.cross_val_score(clf,X,y,cv=5,scoring="accuracy")
print(scores,scores.mean())
print("===performance on TEST===")
# Split the data into a training set and a test set; 分为训练集 检验集
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf= svm.SVC(kernel='rbf')
clf.fit(X_train,y_train)
y_pred =clf.predict(X_test)
# 计算混淆 矩阵 Compute confusion matrix
cm = confusion_matrix(y_test, y_pred)
print(cm)
#计算准确率,MCC等
print("MCC: %f " %matthews_corrcoef(y_test,y_pred))
print( "ACC: %f " %accuracy_score(y_test,y_pred))
print("===compute auc ===")
#compute the auc
classifier = svm.SVC(kernel='rbf',probability=True)
model=classifier.fit(X_train,y_train)
y_prob =classifier.predict_proba(X_test)[:,1] #get the probability of positive
print(y_test)
print(y_prob)
print( "AUC: %f " %roc_auc_score(y_test,y_prob)) | [
"[email protected]"
] | |
06ea068a4936143480c9a697be9cf4c96253f1ca | de9ab2997753d77d09f34a7eab63e47a68f6bf25 | /scripts/boolean_naive_bayes.py | a69abea803bb1cbb4c410923991f9b06ef85a616 | [] | no_license | sidsvash26/kaggle_movies_sentiment | f02a8bd0b1014d65b303db6d1b4348f4a78f5b0f | 2f1039340631db92ddd5df05dada89094a80623b | refs/heads/master | 2021-01-12T17:58:36.260372 | 2016-10-03T19:19:54 | 2016-10-03T19:19:54 | 69,901,261 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,734 | py | # -*- coding: utf-8 -*-
"""
Created on Sun May 8 02:31:20 2016
@author: sidvash
"""
# -*- coding: utf-8 -*-
"""
Created on Thu May 5 01:46:07 2016
@author: sidvash
"""
import pandas as pd
import numpy as np
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import math
import re
stop_words = set(stopwords.words("english"))
#Only for spyder -removes run time warning messages
import warnings
warnings.simplefilter(action = "ignore", category = RuntimeWarning)
def lower_case(string):
return string.lower()
def string_replace(s):
if isinstance(s, str):
#Seperators
s = re.sub(r"([a-zA-Z])\.([a-zA-Z)])", r"\1 \2", s) #sep '.' b/w letters
s=re.sub(r"([0-9])([a-zA-Z])", r"\1 \2", s) #sep alpha anumeric
s=re.sub(r"([a-zA-Z])([0-9])", r"\1 \2", s)
s=re.sub(r"([a-z])([A-Z])", r"\1 \2",s) #sep lowercase and uppercase letter
#removes any , adjacent to an alphabet
s = re.sub(r"\,([a-zA-Z])", r" \1", s)
s = re.sub(r"([a-zA-Z])\,", r"\1 ", s)
s = re.sub(r"([a-zA-Z])\/([a-zA-Z])", r"\1 \2", s) #sep '/' b/w letters
s = re.sub(r"([a-zA-Z])\\([a-zA-Z])", r"\1 \2", s) #sep '\' b/w letters
s=re.sub(r"([a-zA-Z])(\.) ", r"\1 ", s) #removes dot after any alphabet
s=re.sub(r"([0-9])\,([0-9])", r"\1\2", s) #removing commas in b/w numbers
s=re.sub(r"[()]", r" ", s) #removes open and close brackets
#replacements
s=s.replace("-", " ")
s=s.replace("*", " ")
s=s.replace("#", " ")
s=s.replace(";", " ")
s=s.replace("$", " ")
s=s.replace("%", " ")
s=s.replace(",", " ")
s=s.replace(".", " ")
s=s.replace("/", " ")
s=s.replace("\\", " ")
#last substitution to save space
s=re.sub(r" ", r" ", s) #substitutes double space to single
return s
else:
return "null"
def remove_sw(string):
words = word_tokenize(string)
filtered_sentence = []
for x in words:
if x not in stop_words:
filtered_sentence.append(x)
final_sentence = " ".join(filtered_sentence)
return final_sentence
def count_word(word, string):
count = 0
string1 = word_tokenize(string)
for x in string1:
if x == word:
count = count+1
return count
def count_unique_word(string):
words = word_tokenize(string)
unique_words = []
for x in words:
if x not in unique_words:
unique_words.append(x)
return len(unique_words)
def unique_words(string):
words = word_tokenize(string)
unique_words = []
for x in words:
if x not in unique_words:
unique_words.append(x)
return ' '.join(unique_words)
def freq_word_dict(string):
words = word_tokenize(string)
wordfreq = {}
for x in words:
if x not in wordfreq:
wordfreq[x] = 0
wordfreq[x] += 1
return wordfreq
df_train = pd.read_csv('/home/sidvash/kaggle_2016/sentiment_movies/raw_data/train.tsv', sep = '\t')
df_test = pd.read_csv('/home/sidvash/kaggle_2016/sentiment_movies/raw_data/test.tsv', sep = '\t')
df_all = pd.concat((df_train, df_test), axis=0)
num_train = df_train.shape[0]
num_test=df_test.shape[0]
#Pre-process
df_all['filtered_phrase'] = df_all['Phrase'].map(lambda x: lower_case(x))
df_all['filtered_phrase'] = df_all['filtered_phrase'].map(lambda x: string_replace(x))
#df_all['filtered_phrase'] = df_all['filtered_phrase'].map(lambda x: remove_sw(x))
#For boolean naive bayes
df_all['filtered_phrase'] = df_all['filtered_phrase'].map(lambda x: unique_words(x))
#**************** Multinomial Naive Bayes ***************
#*** Calculating Priors ***
s_class = df_train.Sentiment.unique() #array of classes
#no of Examples in each class
count_no_class ={ x: df_train[df_train.Sentiment == x].shape[0] for x in s_class}
#Dictionary of priors
p_class = {x: count_no_class[x]/num_train for x in count_no_class}
#*** Text dictionary of each class
text_class = {x: ' '.join(df_all[df_all.Sentiment == x].filtered_phrase) for x in s_class}
#no of words in each class
count_word_class = {x: len(word_tokenize(text_class[x])) for x in s_class}
#Vocabulary of training set
train_vocab = count_unique_word(' '.join(df_all["filtered_phrase"][:num_train]))
#Freq dictionaries:
categ_freq_word = { x: freq_word_dict(text_class[x]) for x in s_class}
def freq_word_in_categ(word, categ):
if word in categ_freq_word[categ]:
freq = categ_freq_word[categ][word]
else:
freq = 0
return freq
def log_prob_string(string, categ,vocab):
words = word_tokenize(string)
log_prob = 0
for x in words:
log_prob += math.log10(freq_word_in_categ(x,categ)+1) - math.log10(count_word_class[categ]+train_vocab)
return log_prob + math.log10(p_class[categ])
#Predict probabilities for each class
def return_categ(string):
prob = {x: log_prob_string(string,x,train_vocab) for x in s_class}
return max(prob, key=prob.get)
df_train = df_all[:num_train]
df_test = df_all[num_train:]
df_train['preds'] = df_train['filtered_phrase'].map(lambda x: return_categ(x))
train_accuracy = (df_train[df_train.preds == df_train.Sentiment].shape[0]) / (df_train.shape[0])
df_test['preds'] = df_test['filtered_phrase'].map(lambda x: return_categ(x))
df_submit = df_test[['PhraseId', 'preds']].rename(columns = {'preds':'Sentiment'})
df_submit.to_csv('/home/sidvash/kaggle_2016/sentiment_movies/submissions/boolean_naive_bayes_incl_sw.csv', index=False) | [
"[email protected]"
] | |
0f2f2af378ce339b3f1b56131519c55fa8fa4a8c | 8828b0048c6c6253d0f8886530af354ffbb51df2 | /config/test/config_test.py | 3c9f98ad4a3b34c99021d7566006f59fbdaa7d0d | [] | no_license | stefan-rz/Tap-News | 9031a2bd392b3cca81bb2a3ba0b178d0810c58bb | 21c5bd1bd474fee6d06c924fd5f0046754bdc036 | refs/heads/master | 2021-06-16T11:44:11.423615 | 2017-05-22T01:07:54 | 2017-05-22T01:07:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 564 | py | import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from config import Config as cfg
def test_read():
cf = cfg().load_config_file()
assert 'localhost', cf.operations.REDIS_HOST
print 'test pass.'
def test_singleton():
borg = cfg().load_config_file()
another_borg = cfg().load_config_file()
print borg is not another_borg
print borg
print another_borg
assert borg, another_borg
print 'Singleton test pass!'
cfg()._drop()
if __name__ == "__main__":
test_read()
test_singleton()
| [
"[email protected]"
] | |
7480bda28d7c6e29cbb2bd96889c9340d1a9e221 | 80e3dfac67631cef70da3dc015d4557d46d41801 | /src/grid/changemapanimations.py | 59382ea322cc21c033792db21c18f8d56677383d | [] | no_license | snizzo/lucrezia | 7b6d648f11581ddd21acd301df5426d77b6b21dd | fb41e5ba4435d48d702c58aa9402c10ab12a51b9 | refs/heads/master | 2023-06-22T13:14:06.634116 | 2023-06-18T22:42:14 | 2023-06-18T22:42:14 | 32,765,327 | 8 | 4 | null | null | null | null | UTF-8 | Python | false | false | 700 | py | #panda3d
from panda3d.core import NodePath, LPoint2i
from direct.showbase.DirectObject import DirectObject
from direct.interval.IntervalGlobal import *
#internals
from utils.toggle import Toggle
from utils.once import Once
from objects.grass import Grass
from objects.light import Light
from tile import Tile
from character import Character
from utils.fadeout import FadeOut
import os, sys
'''
This class abstracts the 2D grid commoly used in 2D games
to use with panda3d.
INTERNAL TILESET EXAMPLE GRAPH:
^
|
|
y |
|
|
O------------>
x
'''
class ChangeMapAnimations(DirectObject):
'''
Autogenerates empty tileset at start
'''
def __init__(self):
pass
| [
"[email protected]"
] | |
e6600a0f36a22c1dbdb2c5889697375206961f27 | 54a127fc963a95e5107c913b2340953bdc7c58c5 | /venv/bin/pip3.6 | c43c9adf7f43eee6a1398e4b7fdf46e402f6f315 | [] | no_license | JiyoungJeong/Term-Project | a4bc0d67c53de77beefe56c820225b9ee243ef65 | 6bfe47a586e503bbd4aa5085c9a3f4e241421be2 | refs/heads/master | 2020-04-12T19:48:21.834048 | 2018-12-21T13:49:22 | 2018-12-21T13:49:23 | 162,718,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | 6 | #!/Users/jungjiyoung/PycharmProjects/termproject/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.6'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.6')()
)
| [
"[email protected]"
] | |
3345e3d6d34fd46bbc12bc8bebb4b184beddd0d4 | c37257a50159e1c6076668c0c463eaa287d53cdc | /exercises/006/solution.py | f88de700baaec7149ca79a0646667e36c22967e8 | [] | no_license | June-VK/hackinscience | fd2b2a1f4a901ffb26e9aedd58556e7fedf76571 | 21d9bb142edd6f4b9845f9488a7e44f54baf4376 | refs/heads/master | 2020-06-14T22:28:25.108724 | 2016-12-19T14:56:06 | 2016-12-19T14:56:06 | 75,403,537 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 49 | py | #!/usr/bin/python/
import sys
print(sys.argv[0])
| [
"[email protected]"
] | |
1321de452caf0060f6d2cf2523a3f418c5ce49c9 | 4e382ae46cf997ea2dbdfcfa463a57d3e0e9ad97 | /sols/different_ways_to_add_parentheses.py | bd10c6c02ef9f79a33afab5e46cb95900b8ae084 | [] | no_license | hayeonk/leetcode | 5136824838eb17ed2e4b7004301ba5bb1037082f | 6485f8f9b5aa198e96fbb800b058d9283a28e4e2 | refs/heads/master | 2020-04-28T03:37:16.800519 | 2019-06-01T14:34:45 | 2019-06-01T14:34:45 | 174,943,756 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | class Solution(object):
def diffWaysToCompute(self, exp):
def calculate(num1, num2, sign):
if sign == "+":
return num1 + num2
elif sign == "-":
return num1 - num2
else:
return num1 * num2
ans = []
for i in xrange(len(exp)):
if not exp[i].isdigit():
left = self.diffWaysToCompute(exp[:i])
right = self.diffWaysToCompute(exp[i+1:])
for n1 in left:
for n2 in right:
ans.append(calculate(n1, n2, exp[i]))
if not ans:
return [int(exp)]
return ans | [
"[email protected]"
] | |
9a5e3603cbec3e1f9a021bad6ebb551781f4279a | 0f950b17166ec3a327612ac8bb78fba988b54645 | /models/sinapimodels.py | aa4e21f618eeb9102e7a0584ae104266016e9d1d | [
"MIT"
] | permissive | brunotvs/projecLCQ_paper | 218326710a431fd9d4a9bc152396fa6968e7df5b | b5f7af96a0da1116095d3777a4954afae0a64939 | refs/heads/main | 2023-04-05T16:41:24.445231 | 2021-04-15T01:08:57 | 2021-04-15T01:08:57 | 358,077,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,794 | py | from sqlalchemy import (Column, Float, ForeignKey, Integer, Sequence, String,
Table, Boolean)
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.orm import relationship
from sqlalchemy.schema import UniqueConstraint, Index
from sqlalchemy.sql import func
from . import Base, session
class Composition(Base):
__tablename__ = 'compositions'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String, nullable=False)
code = Column(String(16), nullable=False, unique=True)
units = Column(String(8), nullable=False)
group_id = Column(Integer, ForeignKey('groups.id'), nullable=False)
group = relationship('Group', back_populates='compositions', uselist=False)
steps = relationship(
'Step',
cascade="all, delete, delete-orphan",
back_populates='composition'
)
def __init__(self, name: str, code: str, units: str, group: 'Group'):
self.name = name
self.code = code
self.units = units
self.group = group
session.add(self)
def __repr__(self):
return f"<Composition(code={self.code})>"
@property
def direct_resources_attributions(self) -> 'Attribution':
''' Insumos diretos, não provenientes de composições auxiliares '''
atts = session.query(Step.attributions).filter(
Step.composition_id == self.id,
Step.direct == True
).all()
return atts
@property
def grouped_attributions(self):
attr = session.query(
Attribution,
func.sum(Step.coefficient * Attribution.coefficient).label('sum_coefficient')
) \
.filter(Attribution.step_id.in_([step.id for step in self.steps])) \
.filter(Step.id == Attribution.step_id) \
.group_by(Attribution.resource_id) \
.all()
return attr
@property
def cost(self):
cost = sum([step.cost * step.coefficient for step in self.steps])
return cost
class Step(Base):
__tablename__ = 'steps'
id = Column(Integer, primary_key=True, autoincrement=True)
coefficient = Column(Float, nullable=False)
name = Column(String, nullable=False)
direct = Column(Boolean, nullable=False, default=False)
composition_id = Column(Integer, ForeignKey('compositions.id'), nullable=False)
attributions = relationship(
'Attribution', back_populates='step',
cascade="all, delete, delete-orphan",
foreign_keys="[Attribution.step_id]"
)
resources = association_proxy("attributions", "resource")
composition = relationship('Composition', back_populates='steps', uselist=False)
def __init__(self, name: str, coefficient: float):
self.name = name
self.coefficient = coefficient
def __repr__(self):
return f"<Step(name={self.name}, coefficient={self.coefficient})>"
@property
def cost(self):
cost = sum([attribution.coefficient * attribution.resource.cost for attribution in self.attributions])
return cost
class Attribution(Base):
__tablename__ = 'attributions'
id = Column(Integer, primary_key=True, autoincrement=True)
resource_id = Column(Integer, ForeignKey('resources.id'))
step_id = Column(Integer, ForeignKey('steps.id'))
coefficient = Column(Float, nullable=False)
step = relationship("Step", back_populates='attributions', uselist=False)
resource = relationship("Resource", back_populates='attributions', uselist=False)
def __init__(self, resource: "Resource", coefficient: float):
self.resource = resource
self.coefficient = coefficient
def __repr__(self):
return f"<Attribution(resource={self.resource.code}, coefficient={self.coefficient})>"
class Resource(Base):
__tablename__ = 'resources'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String, nullable=False)
code = Column(String(16), nullable=False, unique=True)
units = Column(String(8), nullable=False)
cost = Column(Float(precision=2), nullable=False)
attributions = relationship('Attribution', back_populates='resource')
compositions = association_proxy('attributions', 'step')
impact = relationship('Impact', back_populates='resource', uselist=False)
process = relationship('Process', back_populates='resource', uselist=False)
def __init__(self, name: str, code: str, units: str, cost: float):
self.name = name
self.code = code
self.units = units
self.cost = cost
session.add(self)
def __repr__(self):
return f"<Resource(code={self.code}, cost={self.cost})>"
class Group(Base):
__tablename__ = 'groups'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String, nullable=False)
code = Column(String(16), nullable=False, unique=True)
parent_id = Column(Integer, ForeignKey("groups.id"))
parent = relationship("Group", back_populates='children', uselist=False, remote_side=[id])
children = relationship("Group", back_populates='parent')
compositions = relationship('Composition', back_populates='group')
def __init__(self, name: str, code: str):
self.name = name
self.code = code
self.__query__()
session.add(self)
@classmethod
def __query__(cls):
cls.query = session.query(cls)
class Impact(Base):
__tablename__ = 'impacts'
id = Column(Integer, primary_key=True, autoincrement=True)
units = Column(String, nullable=True)
ozone_formation_terrestrial_ecosystems = Column(Float, nullable=False)
water_consumption = Column(Float, nullable=False)
marine_eutrophication = Column(Float, nullable=False)
marine_ecotoxicity = Column(Float, nullable=False)
land_use = Column(Float, nullable=False)
fine_particulate_matter_formation = Column(Float, nullable=False)
mineral_resource_scarcity = Column(Float, nullable=False)
ionizing_radiation = Column(Float, nullable=False)
human_non_carcinogenic_toxicity = Column(Float, nullable=False)
freshwater_eutrophication = Column(Float, nullable=False)
terrestrial_acidification = Column(Float, nullable=False)
fossil_resource_scarcity = Column(Float, nullable=False)
global_warming = Column(Float, nullable=False)
stratospheric_ozone_depletion = Column(Float, nullable=False)
terresetrial_ecotoxicity = Column(Float, nullable=False)
human_carcinogenic_toxicity = Column(Float, nullable=False)
freshwater_ecotoxicity = Column(Float, nullable=False)
ozone_formation_human_health = Column(Float, nullable=False)
resource_id = Column(Integer, ForeignKey("resources.id"))
resource = relationship("Resource", back_populates='impact', uselist=False)
def __init__(self, resource, units, ozone_formation_terrestrial_ecosystems, water_consumption,
marine_eutrophication, marine_ecotoxicity, land_use,
fine_particulate_matter_formation, mineral_resource_scarcity,
ionizing_radiation, human_non_carcinogenic_toxicity, freshwater_eutrophication,
terrestrial_acidification, fossil_resource_scarcity, global_warming,
stratospheric_ozone_depletion, terresetrial_ecotoxicity,
human_carcinogenic_toxicity, freshwater_ecotoxicity, ozone_formation_human_health):
self.resource = resource
self.units = units
self.ozone_formation_terrestrial_ecosystems = ozone_formation_terrestrial_ecosystems
self.water_consumption = water_consumption
self.marine_eutrophication = marine_eutrophication
self.marine_ecotoxicity = marine_ecotoxicity
self.land_use = land_use
self.fine_particulate_matter_formation = fine_particulate_matter_formation
self.mineral_resource_scarcity = mineral_resource_scarcity
self.ionizing_radiation = ionizing_radiation
self.human_non_carcinogenic_toxicity = human_non_carcinogenic_toxicity
self.freshwater_eutrophication = freshwater_eutrophication
self.terrestrial_acidification = terrestrial_acidification
self.fossil_resource_scarcity = fossil_resource_scarcity
self.global_warming = global_warming
self.stratospheric_ozone_depletion = stratospheric_ozone_depletion
self.terresetrial_ecotoxicity = terresetrial_ecotoxicity
self.human_carcinogenic_toxicity = human_carcinogenic_toxicity
self.freshwater_ecotoxicity = freshwater_ecotoxicity
self.ozone_formation_human_health = ozone_formation_human_health
class Process(Base):
__tablename__ = 'processess'
id = Column(Integer, primary_key=True, autoincrement=True)
units = Column(String, nullable=True)
flow = Column(String, nullable=False)
category = Column(String, nullable=False)
uncertainty = Column(String, nullable=True)
avoided_waste = Column(String, nullable=True)
provider = Column(String, nullable=True)
data_quality_entry = Column(String, nullable=True)
description = Column(String, nullable=True)
resource_id = Column(Integer, ForeignKey("resources.id"))
resource = relationship("Resource", back_populates='process', uselist=False)
def __init__(self, resource, units, flow, category, uncertainty, avoided_waste, provider, data_quality_entry, description):
self.resource = resource
self.units = units
self.flow = flow
self.category = category
self.uncertainty = uncertainty
self.avoided_waste = avoided_waste
self.provider = provider
self.data_quality_entry = data_quality_entry
self.description = description | [
"[email protected]"
] | |
aa3f4f7235ffe090bfb7628336f9b3504774ab15 | 4a4579254118db40fb008439d18ad8c573e8fc1a | /build/jsk_common_msgs/jsk_gui_msgs/cmake/jsk_gui_msgs-genmsg-context.py | f56cef5c14333e672ea233f015bc2fa7f8ca52be | [] | no_license | amilearning/AD_mpc_ws | 86ff6ef9e61c6cc5aae6e12f20c2c875b1930d41 | 1fc2d385f281e00c16aff688948f7296e02cbd3a | refs/heads/master | 2023-06-24T13:54:59.759921 | 2021-07-16T01:08:52 | 2021-07-16T01:08:52 | 386,465,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,726 | py | # generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/hmcl/shared_dir/mpc_ws/src/jsk_common_msgs/jsk_gui_msgs/msg/Action.msg;/home/hmcl/shared_dir/mpc_ws/src/jsk_common_msgs/jsk_gui_msgs/msg/MagneticField.msg;/home/hmcl/shared_dir/mpc_ws/src/jsk_common_msgs/jsk_gui_msgs/msg/Touch.msg;/home/hmcl/shared_dir/mpc_ws/src/jsk_common_msgs/jsk_gui_msgs/msg/AndroidSensor.msg;/home/hmcl/shared_dir/mpc_ws/src/jsk_common_msgs/jsk_gui_msgs/msg/Gravity.msg;/home/hmcl/shared_dir/mpc_ws/src/jsk_common_msgs/jsk_gui_msgs/msg/MultiTouch.msg;/home/hmcl/shared_dir/mpc_ws/src/jsk_common_msgs/jsk_gui_msgs/msg/TouchEvent.msg;/home/hmcl/shared_dir/mpc_ws/src/jsk_common_msgs/jsk_gui_msgs/msg/DeviceSensor.msg;/home/hmcl/shared_dir/mpc_ws/src/jsk_common_msgs/jsk_gui_msgs/msg/Tablet.msg;/home/hmcl/shared_dir/mpc_ws/src/jsk_common_msgs/jsk_gui_msgs/msg/VoiceMessage.msg;/home/hmcl/shared_dir/mpc_ws/src/jsk_common_msgs/jsk_gui_msgs/msg/SlackMessage.msg"
services_str = "/home/hmcl/shared_dir/mpc_ws/src/jsk_common_msgs/jsk_gui_msgs/srv/Query.srv;/home/hmcl/shared_dir/mpc_ws/src/jsk_common_msgs/jsk_gui_msgs/srv/YesNo.srv"
pkg_name = "jsk_gui_msgs"
dependencies_str = "std_msgs;sensor_msgs;geometry_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "jsk_gui_msgs;/home/hmcl/shared_dir/mpc_ws/src/jsk_common_msgs/jsk_gui_msgs/msg;std_msgs;/opt/ros/melodic/share/std_msgs/cmake/../msg;sensor_msgs;/opt/ros/melodic/share/sensor_msgs/cmake/../msg;geometry_msgs;/opt/ros/melodic/share/geometry_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python2"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/melodic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
| [
"[email protected]"
] | |
5ea65368b06c233a66fb293222f5e0e86ade3e0a | f81c9451768a52bc22c6a2abe87c25615ea8b3e6 | /汇总文件/jobboleproject/jobboleproject/spiders/crawlJobbole.py | b191639c8f7518c6e3da765558ff142ab763b436 | [] | no_license | hanfang302/crawlercollect | 07cb7fb5faf85018e82f48b0209bd86dc5c52f8f | 8f7b286df8bf0a344c3656bda5c7fb96cee640dc | refs/heads/master | 2020-03-22T05:27:07.928855 | 2018-07-03T10:26:07 | 2018-07-03T10:26:07 | 139,566,567 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,246 | py | # -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
#继承crawlspider,crawlspider有继承之spider
class CrawljobboleSpider(CrawlSpider):
#爬虫名称
name = 'crawlJobbole'
#域,制定可以爬取的url必须在这个域下面,不在的自动忽略
allowed_domains = ['blog.jobbole.com']
#同样有一个其实url
start_urls = ['http://blog.jobbole.com/']
#
#LinkExtractor一个类
#restrict_xpaths:制定了xpath路径,那么allow
#allow:匹配‘满足’括号中‘正则表达式’的url会被提取,如果为空,这全部匹配
#deny:匹配‘不满足’括号中‘正则表达式’的url会被提取
#callback:回调函数
rules = (
Rule(LinkExtractor(allow=r'.*?/item/bcjdb',deny=r'.*?/notitem/slcnd',restrict_xpaths='//div[@class=a]'), callback='parse_item', follow=True),
)
def parse_item(self, response):
i = {}
#i['domain_id'] = response.xpath('//input[@id="sid"]/@value').extract()
#i['name'] = response.xpath('//div[@id="name"]').extract()
#i['description'] = response.xpath('//div[@id="description"]').extract()
return i
| [
"[email protected]"
] | |
fc255fe8fed8197264367180513e6fb8aebecba2 | 08b439af0eeccb93b41193b65c196b7ab2dbe773 | /award/urls.py | 1ae3b1aec217ee245e5c22ac908b89354d53b4e5 | [
"MIT"
] | permissive | EidAbdullahi/hilal | a80aa3f6b7ce2a3098f5dcada9bbb2fc52dc6ba9 | 1ccb6a1b5a5143164ced9b8a4c742997abbb6296 | refs/heads/master | 2023-05-01T14:28:46.837200 | 2021-05-05T12:02:48 | 2021-05-05T12:02:48 | 363,429,977 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,485 | py | """award URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from django.contrib.auth import views
from rest_framework.authtoken.views import obtain_auth_token
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('wardapp.urls')),
path('tinymce/', include('tinymce.urls')),
path('accounts/', include('django_registration.backends.one_step.urls')),
path('accounts/', include('django.contrib.auth.urls')),
path('accounts/register/', views.LoginView.as_view(template_name='django_registration/registration_form.html')),
path('accounts/login/', views.LoginView.as_view(template_name='registration/login.html')),
path('accounts/logout/', views.logout_then_login, {"next_page": '/'}),
path('ratings/', include('star_ratings.urls', namespace='ratings')),
path('api-token-auth', obtain_auth_token),
]
| [
"[email protected]"
] | |
5cc764c46a88da96560ed44ecd0a68781e9bf3f7 | 9e0d14ff84f104d7721b018008004f8ade5ddb70 | /venv/lib/python3.8/site-packages/google/protobuf/internal/decoder.py | 9a4e15202b2f4f6841a6841917a92b98b5229367 | [] | no_license | rowdypanda/Pandamonium | fc728af15a24d0a38c368ae2862d8d1c858e60ee | 599a26ccd6df3e298fc6fa1379f5d945d5ce4ee3 | refs/heads/master | 2023-07-19T03:49:34.171932 | 2023-07-08T08:58:15 | 2023-07-08T08:58:15 | 333,290,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | /home/runner/.cache/pip/pool/c4/5c/45/672b68707cafba78a03c05b4e0c5376c1ceb2d8d85a0a3f34f1123b992 | [
"[email protected]"
] | |
b486a84b438286c67481b13127a2048197fd09f7 | 456102b543d6e239394c91a62d26af4779ba0437 | /MGNNet/loss/__init__.py | 62a029827aa2c4901270a5582c93429994de27c2 | [] | no_license | pxssw/SLAB_Facial-Expression-Recognition | 8523f3c24584bb47fe13c4e70a23ad2ecdf7476f | f220475f7cdb9b5693c5b91e4769e7a9605cc450 | refs/heads/master | 2022-12-13T07:27:24.088863 | 2020-09-16T14:16:20 | 2020-09-16T14:16:20 | 296,046,311 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,461 | py | import os
import numpy as np
from importlib import import_module
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from .triplet import TripletLoss, TripletSemihardLoss
class Loss(nn.modules.loss._Loss):
def __init__(self, args, ckpt):
super(Loss, self).__init__()
print('[INFO] Making loss...')
self.nGPU = args.nGPU
self.args = args
self.loss = []
self.loss_module = nn.ModuleList()
for loss in args.loss.split('+'):
weight, loss_type = loss.split('*')
if loss_type == 'CrossEntropy':
loss_function = nn.CrossEntropyLoss()
elif loss_type == 'Triplet':
loss_function = TripletLoss(args.margin)
self.loss.append({
'type': loss_type,
'weight': float(weight),
'function': loss_function
})
if len(self.loss) > 1:
self.loss.append({'type': 'Total', 'weight': 0, 'function': None})
for l in self.loss:
if l['function'] is not None:
print('{:.3f} * {}'.format(l['weight'], l['type']))
self.loss_module.append(l['function'])
self.log = torch.Tensor()
device = torch.device('cpu' if args.cpu else 'cuda')
self.loss_module.to(device)
if args.load != '': self.load(ckpt.dir, cpu=args.cpu)
if not args.cpu and args.nGPU > 1:
self.loss_module = nn.DataParallel(
self.loss_module, range(args.nGPU)
)
def forward(self, outputs, labels):
losses = []
for i, l in enumerate(self.loss):
if self.args.model == 'MGN' and l['type'] == 'Triplet':
loss = [l['function'](output, labels) for output in outputs[1:4]]
loss = sum(loss) / len(loss)
effective_loss = l['weight'] * loss
losses.append(effective_loss)
self.log[-1, i] += effective_loss.item()
elif self.args.model == 'MGN' and l['function'] is not None:
loss = [l['function'](output, labels) for output in outputs[4:]]
loss = sum(loss) / len(loss)
effective_loss = l['weight'] * loss
losses.append(effective_loss)
self.log[-1, i] += effective_loss.item()
else:
pass
loss_sum = sum(losses)
if len(self.loss) > 1:
self.log[-1, -1] += loss_sum.item()
return loss_sum
def start_log(self):
self.log = torch.cat((self.log, torch.zeros(1, len(self.loss))))
def end_log(self, batches):
self.log[-1].div_(batches)
def display_loss(self, batch):
n_samples = batch + 1
log = []
for l, c in zip(self.loss, self.log[-1]):
log.append('[{}: {:.4f}]'.format(l['type'], c / n_samples))
return ''.join(log)
def plot_loss(self, apath, epoch):
axis = np.linspace(1, epoch, epoch)
for i, l in enumerate(self.loss):
label = '{} Loss'.format(l['type'])
fig = plt.figure()
plt.title(label)
plt.plot(axis, self.log[:, i].numpy(), label=label)
plt.legend()
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.grid(True)
plt.savefig('{}/loss_{}.jpg'.format(apath, l['type']))
plt.close(fig)
def step(self):
for l in self.get_loss_module():
if hasattr(l, 'scheduler'):
l.scheduler.step()
def get_loss_module(self):
if self.nGPU == 1:
return self.loss_module
else:
return self.loss_module.module
def save(self, apath):
torch.save(self.state_dict(), os.path.join(apath, 'loss.pt'))
torch.save(self.log, os.path.join(apath, 'loss_log.pt'))
def load(self, apath, cpu=False):
if cpu:
kwargs = {'map_location': lambda storage, loc: storage}
else:
kwargs = {}
self.load_state_dict(torch.load(
os.path.join(apath, 'loss.pt'),
**kwargs
))
self.log = torch.load(os.path.join(apath, 'loss_log.pt'))
for l in self.loss_module:
if hasattr(l, 'scheduler'):
for _ in range(len(self.log)): l.scheduler.step()
| [
"[email protected]"
] | |
464e90b29f36fab7808df0251337c7a7dc1eb54d | 62c018ad1398317a4f18ffd5fb9f4d3ef9466fe5 | /apps/inscripciones/models.py | 959be0a27b479f6ff5bb9ec0159398a3a8837612 | [] | no_license | dgosandoval/proyectoMultiapps | ded950a09a85444b69db31e89821961ccbf8987f | eaa025451d1bac2f5ac768f33d115c03996dbda3 | refs/heads/main | 2023-04-26T01:15:57.095997 | 2021-05-25T16:19:54 | 2021-05-25T16:19:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 730 | py | from __future__ import unicode_literals
from django.db import models
from apps.cursos.models import *
from apps.usuarios.models import *
class Inscripcion(models.Model):
participante = models.ForeignKey(Usuario, related_name="inscripcionUsuario", on_delete=models.CASCADE, blank=True,
null=True,
default=None)
taller = models.ForeignKey(Curso, related_name="inscripcionCurso", on_delete=models.CASCADE, blank=True,
null=True,
default=None)
confirmado = models.BooleanField(default=1)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
# def __str__(self):
# return f"{self.alias} acceso: {self.acceso}"
| [
"[email protected]"
] | |
bff8f74051ac91f5994980953d38279ba111917d | e9b9626ffce09bf011803a33b4780d8dcc6657e8 | /class9/collateral/put_file.py | aae2559cec9e5615c13bc1c06a49886309059ceb | [
"Apache-2.0"
] | permissive | zh0u0liver/netmiko_course | f7a91eb3f543e2a609172280f13b6bc8c4fbe1b9 | 31943e4f6f66dbfe523d62d5a2f03285802a8c56 | refs/heads/master | 2023-08-04T05:41:32.745403 | 2021-09-14T02:18:44 | 2021-09-14T02:18:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 885 | py | import os
from getpass import getpass
from netmiko import ConnectHandler, file_transfer
# Code so automated tests will run properly
password = os.getenv("NETMIKO_PASSWORD") if os.getenv("NETMIKO_PASSWORD") else getpass()
# Need a privilege15 account (no enable call)
cisco3 = {
"device_type": "cisco_ios",
"host": "cisco3.lasthop.io",
"username": "pyclass",
"password": password,
}
# Secure copy server must be enable on the device ('ip scp server enable')
source_file = "test2.txt"
dest_file = "test2.txt"
direction = "put"
file_system = "flash:"
ssh_conn = ConnectHandler(**cisco3)
transfer_dict = file_transfer(
ssh_conn,
source_file=source_file,
dest_file=dest_file,
file_system=file_system,
direction=direction,
overwrite_file=True, # default "will not overwrite"
inline_transfer=True,
)
ssh_conn.disconnect()
print(transfer_dict)
| [
"[email protected]"
] | |
9099a14ebb9e84852429bdb3e280f2b0e49072f1 | 0fef7c930629087ba38d2f1892f837719b34375d | /run_game.py | f976e871ddb49272e6556ac6cc47bca5577cd951 | [] | no_license | kn45/go-bang | cb3b43c77cc7c322a5786d663f617c7c0cdfb6cd | d25eecc8c77d549cd645c748411abea373d001ed | refs/heads/master | 2021-10-08T10:20:51.520825 | 2018-12-11T06:55:05 | 2018-12-11T06:55:05 | 114,105,594 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,890 | py | #!/usr/bin/env python
from uct import *
from game import *
from player import *
from common import cprint, pos2h
def play_game(game, p1, p2, verbose=True):
players = [p1, p2]
gprint = cprint(verbose)
gprint(game.board)
while game.game_status == GameStatus.UNDERGOING:
player_idx = (game.player + 1) / 2 # (-1,+1) -> (0,1)
move, value = players[player_idx].choose_best_move(game, True)
game.move(move)
gprint(game.board)
print 'last move:', pos2h(move, 15)
gprint('Res:' + str(game.game_status))
return game.game_status
def play_gobang_rounds(p1, p2, nround=100):
res_count = [0] * 3 # np1win, ndraw, np2win
for _ in range(nround):
res = play_game(GoBang(), p1, p2, verbose=False)
res_count[res+1] += 1
return res_count
def play_tictactoe_rounds(p1, p2, nround=100):
res_count = [0] * 3 # np1win, ndraw, np2win
for _ in range(nround):
res = play_game(TicTacToe(), p1, p2, verbose=False)
res_count[res+1] += 1
return res_count
if __name__ == '__main__':
# play_game(GoBang(), RandomPlayer(), ManualPlayer())
# play_game(GoBang(), RandomPlayer(), GoBangPlayer())
# play_game(GoBang(), RandomPlayer(), UCTPlayer())
# play_game(GoBang(), ManualPlayer(), UCTPlayer())
# play_game(GoBang(), UCTPlayer(), UCTPlayer())
play_game(GoBang(), UCTPlayer(), ManualPlayer())
# print play_gobang_rounds(RandomPlayer(), GoBangPlayer(), nround=10)
# play_game(TicTacToe(), ManualPlayer(), TicTacToePlayer())
# play_game(TicTacToe(), RandomPlayer(), TicTacToePlayer())
# play_game(TicTacToe(), UCTPlayer(), RandomPlayer())
# play_game(TicTacToe(), ManualPlayer(), UCTPlayer())
# print play_tictactoe_rounds(RandomPlayer(), TicTacToePlayer(), nround=100)
# print play_tictactoe_rounds(UCTPlayer(), RandomPlayer(), nround=100)
| [
"[email protected]"
] | |
679e412a82c2340b28045e6cdb388a19bfb5b799 | d628948e86841ae3efc93eba2e321dd58fe33b07 | /bookmanager/settings.py | 14f4a01b82c72de936cfd48b51e0a9eebea7e085 | [] | no_license | shd0812/django_demo | 8986dde23c2fd8ae4a46f8a938c9c0924200d4b2 | 832c028171795bf6feabc39d313bcad8cfbe5b94 | refs/heads/master | 2022-12-05T18:57:11.213951 | 2020-08-24T08:15:50 | 2020-08-24T08:15:50 | 289,303,312 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,332 | py | """
Django settings for bookmanager project.
Generated by 'django-admin startproject' using Django 1.11.28.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 's6e4p%v(%&)$d(s%$l&&inwkn8)4)%&kqc-w+yssvg^)g)fe41'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'appone.apps.ApponeConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'bookmanager.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'bookmanager.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'bookmanager',
'HOST': '122.51.192.201',
'PORT': 3306,
'USER': 'root',
'PASSWORD': '123'
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
| [
"[email protected]"
] | |
ab9a23317b1997f28cfa6c1d816d647193b2be04 | ddf55f8e6d23dee073865ae2f37d943643a342ac | /src/meltano/core/permissions/pg_spec_loader.py | d7b8a1cc004c1116b1a44e3f4ac0940513ccb1d9 | [
"MIT"
] | permissive | NikolayS/meltano | d7ac7c1355acd907c71c601ecfb3b965ce8448a0 | 4616d44ded9dff4e9ad19a9004349e9baa16ddd5 | refs/heads/master | 2023-01-05T22:55:15.423831 | 2019-12-09T07:51:09 | 2019-12-09T07:51:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,109 | py | import cerberus
import logging
import yaml
from typing import Dict, List
from meltano.core.permissions.utils.error import SpecLoadingError
from meltano.core.permissions.spec_schemas.postgres import *
VALIDATION_ERR_MSG = 'Spec error: Role "{}", field "{}": {}'
class PGSpecLoader:
def __init__(self, spec_path: str) -> None:
self.spec = self.load_spec(spec_path)
def load_spec(self, spec_path: str) -> Dict:
"""
Load a permissions specification from a file.
If the file is not found or at least an error is found during validation,
raise a SpecLoadingError with the appropriate error messages.
Otherwise, return the valid specification as a Dictionary to be used
in other operations.
"""
try:
with open(spec_path, "r") as stream:
spec = yaml.load(stream)
except FileNotFoundError:
raise SpecLoadingError(f"Spec File {spec_path} not found")
error_messages = self.ensure_valid_schema(spec)
if error_messages:
raise SpecLoadingError("\n".join(error_messages))
return spec
def ensure_valid_schema(self, spec: Dict) -> List[str]:
"""
Ensure that the provided spec has no schema errors.
Return a list with all the errors found.
"""
error_messages = []
schema = yaml.load(PG_SPEC_SCHEMA_YAML)
v = cerberus.Validator(schema)
role_configs = [(role, config) for role, config in spec.items() if config]
for role, config in role_configs:
v.validate(config)
for field, err_msg in v.errors.items():
error_messages.append(
VALIDATION_ERR_MSG.format(role, field, err_msg[0])
)
return error_messages
def generate_permission_queries(self) -> List[str]:
sql_commands = []
role_configs = [(role, config) for role, config in self.spec.items() if config]
for role, config in role_configs:
sql_commands.extend(self.generate_alter_role(role, config))
sql_commands.extend(self.generate_grant_roles_to_role(role, config))
sql_commands.extend(self.generate_grant_ownership_to_role(role, config))
sql_commands.extend(self.generate_grant_privileges_to_role(role, config))
return sql_commands
def generate_alter_role(self, role: str, config: Dict) -> List[str]:
ALTER_ROLE_TEMPLATE = "ALTER ROLE {role} {privileges}"
sql_commands = []
alter_privileges = []
try:
if config["can_login"]:
alter_privileges.append("LOGIN")
else:
alter_privileges.append("NOLOGIN")
except KeyError:
logging.debug(
"`can_login` not found for {}, skipping login rules.".format(role)
)
try:
if config["is_superuser"]:
alter_privileges.append("SUPERUSER")
else:
alter_privileges.append("NOSUPERUSER")
except KeyError:
logging.debug(
"`is_superuser` not found for {}, skipping superuser rules.".format(
role
)
)
if alter_privileges:
sql_commands.append(
{
"already_granted": False,
"sql": ALTER_ROLE_TEMPLATE.format(
role=role, privileges=" ".join(alter_privileges)
),
}
)
return sql_commands
def generate_grant_roles_to_role(self, role: str, config: Dict) -> List[str]:
GRANT_ROLE_TEMPLATE = "GRANT {role_names} TO {role}"
sql_commands = []
role_names = config.get("member_of", [])
if role_names:
sql_commands.append(
{
"already_granted": False,
"sql": GRANT_ROLE_TEMPLATE.format(
role=role, role_names=", ".join(role_names)
),
}
)
return sql_commands
def generate_grant_ownership_to_role(self, role: str, config: Dict) -> List[str]:
ALTER_SCHEMA_OWNER_TEMPLATE = "ALTER SCHEMA {schema} OWNER TO {role}"
sql_commands = []
try:
for schema in config["owns"]["schemas"]:
sql_commands.append(
{
"already_granted": False,
"sql": ALTER_SCHEMA_OWNER_TEMPLATE.format(
role=role, schema=schema
),
}
)
except KeyError:
logging.debug(
"`owns.schemas` not found for {}, skipping OWNership rules.".format(
role
)
)
return sql_commands
def generate_grant_privileges_to_role(self, role: str, config: Dict) -> List[str]:
GRANT_READ_ON_SCHEMA_TEMPLATE = "GRANT USAGE ON SCHEMA {schema} TO {role}"
GRANT_WRITE_ON_SCHEMA_TEMPLATE = "GRANT CREATE ON SCHEMA {schema} TO {role}"
GRANT_READ_ON_TABLE_TEMPLATE = "GRANT SELECT ON TABLE {table} TO {role}"
GRANT_WRITE_ON_TABLE_TEMPLATE = (
"GRANT SELECT, INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER "
"ON TABLE {table} TO {role}"
)
GRANT_READ_ON_ALL_TABLES_TEMPLATE = (
"GRANT SELECT ON ALL TABLES IN SCHEMA {schema} TO {role}"
)
GRANT_WRITE_ON_ALL_TABLE_TEMPLATE = (
"GRANT SELECT, INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER "
"ON ALL TABLES IN SCHEMA {schema} TO {role}"
)
sql_commands = []
try:
for schema in config["privileges"]["schemas"]["read"]:
sql_commands.append(
{
"already_granted": False,
"sql": GRANT_READ_ON_SCHEMA_TEMPLATE.format(
role=role, schema=schema
),
}
)
except KeyError:
logging.debug(
"`privileges.schemas.read` not found for {}, skipping Schema Read GRANTS.".format(
role
)
)
try:
for schema in config["privileges"]["schemas"]["write"]:
sql_commands.append(
{
"already_granted": False,
"sql": GRANT_WRITE_ON_SCHEMA_TEMPLATE.format(
role=role, schema=schema
),
}
)
except KeyError:
logging.debug(
"`privileges.schemas.write` not found for {}, skipping Schema Write GRANTS.".format(
role
)
)
try:
for table in config["privileges"]["tables"]["read"]:
if table.endswith(".*"):
schema = table[:-2]
sql_commands.append(
{
"already_granted": False,
"sql": GRANT_READ_ON_ALL_TABLES_TEMPLATE.format(
role=role, schema=schema
),
}
)
else:
sql_commands.append(
{
"already_granted": False,
"sql": GRANT_READ_ON_TABLE_TEMPLATE.format(
role=role, table=table
),
}
)
except KeyError:
logging.debug(
"`privileges.tables.read` not found for {}, skipping Table Read GRANTS.".format(
role
)
)
try:
for table in config["privileges"]["tables"]["write"]:
if table.endswith(".*"):
schema = table[:-2]
sql_commands.append(
{
"already_granted": False,
"sql": GRANT_WRITE_ON_ALL_TABLE_TEMPLATE.format(
role=role, schema=schema
),
}
)
else:
sql_commands.append(
{
"already_granted": False,
"sql": GRANT_WRITE_ON_TABLE_TEMPLATE.format(
role=role, table=table
),
}
)
except KeyError:
logging.debug(
"`privileges.tables.write` not found for {}, skipping Table Write GRANTS.".format(
role
)
)
return sql_commands
| [
"[email protected]"
] | |
098d0a2a8b1145a3df6d306fd83f6c68df598e98 | 8ef8e6818c977c26d937d09b46be0d748022ea09 | /cv/distiller/CWD/pytorch/mmrazor/configs/nas/mmcls/onceforall/ofa_mobilenet_supernet_32xb64_in1k.py | 341f4bda969cdd7625e1da7e3e5ff0c36e6fee57 | [
"Apache-2.0"
] | permissive | Deep-Spark/DeepSparkHub | eb5996607e63ccd2c706789f64b3cc0070e7f8ef | 9d643e88946fc4a24f2d4d073c08b05ea693f4c5 | refs/heads/master | 2023-09-01T11:26:49.648759 | 2023-08-25T01:50:18 | 2023-08-25T01:50:18 | 534,133,249 | 7 | 6 | Apache-2.0 | 2023-03-28T02:54:59 | 2022-09-08T09:07:01 | Python | UTF-8 | Python | false | false | 1,671 | py | _base_ = [
'mmcls::_base_/default_runtime.py',
'mmrazor::_base_/settings/imagenet_bs2048_ofa.py',
'mmrazor::_base_/nas_backbones/ofa_mobilenetv3_supernet.py',
]
supernet = dict(
_scope_='mmrazor',
type='SearchableImageClassifier',
data_preprocessor=_base_.data_preprocessor,
backbone=_base_.nas_backbone,
neck=dict(type='mmcls.GlobalAveragePooling'),
head=dict(
type='DynamicLinearClsHead',
num_classes=1000,
in_channels=1280,
loss=dict(
type='mmcls.LabelSmoothLoss',
num_classes=1000,
label_smooth_val=0.1,
mode='original',
loss_weight=1.0),
topk=(1, 5)),
input_resizer_cfg=_base_.input_resizer_cfg,
connect_head=dict(connect_with_backbone='backbone.last_mutable_channels'),
)
model = dict(
_scope_='mmrazor',
type='BigNAS',
drop_path_rate=0.2,
backbone_dropout_stages=[6, 7],
architecture=supernet,
distiller=dict(
type='ConfigurableDistiller',
teacher_recorders=dict(
fc=dict(type='ModuleOutputs', source='head.fc')),
student_recorders=dict(
fc=dict(type='ModuleOutputs', source='head.fc')),
distill_losses=dict(
loss_kl=dict(type='KLDivergence', tau=1, loss_weight=1)),
loss_forward_mappings=dict(
loss_kl=dict(
preds_S=dict(recorder='fc', from_student=True),
preds_T=dict(recorder='fc', from_student=False)))),
mutators=dict(type='mmrazor.NasMutator'))
model_wrapper_cfg = dict(
type='mmrazor.BigNASDDP',
broadcast_buffers=False,
find_unused_parameters=True)
| [
"[email protected]"
] | |
7baea0a1689b386455cd0b163b447de69f0e447d | d632f1b850a389e6b011ec1b72bcfdc6801388a7 | /loss.py | 81db2127b41c7ceaca06e5cd69efd316ab29ebcb | [] | no_license | xkeys/CE-Net | 6539abf1728d6cd8b4332d890370add7d4da6865 | 4507604257801358a0a70d1d436cdeb165a60afc | refs/heads/master | 2020-05-01T16:20:58.230432 | 2019-03-17T17:22:10 | 2019-03-17T17:22:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,219 | py | import torch
import torch.nn as nn
from torch.autograd import Variable as V
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import cv2
import numpy as np
class weighted_cross_entropy(nn.Module):
def __init__(self, num_classes=12, batch=True):
super(weighted_cross_entropy, self).__init__()
self.batch = batch
self.weight = torch.Tensor([52.] * num_classes).cuda()
self.ce_loss = nn.CrossEntropyLoss(weight=self.weight)
def __call__(self, y_true, y_pred):
y_ce_true = y_true.squeeze(dim=1).long()
a = self.ce_loss(y_pred, y_ce_true)
return a
class dice_loss(nn.Module):
def __init__(self, batch=True):
super(dice_loss, self).__init__()
self.batch = batch
def soft_dice_coeff(self, y_true, y_pred):
smooth = 0.0 # may change
if self.batch:
i = torch.sum(y_true)
j = torch.sum(y_pred)
intersection = torch.sum(y_true * y_pred)
else:
i = y_true.sum(1).sum(1).sum(1)
j = y_pred.sum(1).sum(1).sum(1)
intersection = (y_true * y_pred).sum(1).sum(1).sum(1)
score = (2. * intersection + smooth) / (i + j + smooth)
# score = (intersection + smooth) / (i + j - intersection + smooth)#iou
return score.mean()
def soft_dice_loss(self, y_true, y_pred):
loss = 1 - self.soft_dice_coeff(y_true, y_pred)
return loss
def __call__(self, y_true, y_pred):
b = self.soft_dice_loss(y_true, y_pred)
return b
def test_weight_cross_entropy():
N = 4
C = 12
H, W = 128, 128
inputs = torch.rand(N, C, H, W)
targets = torch.LongTensor(N, H, W).random_(C)
inputs_fl = Variable(inputs.clone(), requires_grad=True)
targets_fl = Variable(targets.clone())
print(weighted_cross_entropy()(targets_fl, inputs_fl))
class dice_bce_loss(nn.Module):
def __init__(self, batch=True):
super(dice_bce_loss, self).__init__()
self.batch = batch
self.bce_loss = nn.BCELoss()
self.ce_loss = nn.CrossEntropyLoss()
self.softmax = torch.nn.Softmax(dim=1)
def multi_class_one_hot(self, label, classes):
N, H, W = label.size(0), label.size(2), label.size(3)
# y_hot = torch.LongTensor(N, classes, H, W).cuda()
y_hot = torch.cuda.FloatTensor(N, classes, H, W)
y_hot.zero_()
# y_hot = y_hot.type(torch.cuda.LongTensor)
label = label.type(torch.cuda.LongTensor)
y_hot.scatter_(1, label.view(N, 1, H, W), 1)
return y_hot
def multi_class_dice_loss(self, input, mask):
assert input.size() == mask.size(), "Input sizes must be equal to mask, the input size is {}, and the" \
"mask size is {}".format(input.size(), mask.size())
# print(input.size())
# print(mask.size())
assert input.dim() == 4, "Input must be a 4D tensor"
num = (input * mask).sum(dim=3).sum(dim=2)
den1 = input.pow(2)
den2 = mask.pow(2)
dice = 2 * (num / (den1 + den2).sum(dim=3).sum(dim=2))
return 1. - dice.sum() / (dice.size(1) * dice.size(0))
def soft_dice_coeff(self, y_true, y_pred):
smooth = 0.0 # may change
if self.batch:
i = torch.sum(y_true)
j = torch.sum(y_pred)
intersection = torch.sum(y_true * y_pred)
else:
i = y_true.sum(1).sum(1).sum(1)
j = y_pred.sum(1).sum(1).sum(1)
intersection = (y_true * y_pred).sum(1).sum(1).sum(1)
score = (2. * intersection + smooth) / (i + j + smooth)
#score = (intersection + smooth) / (i + j - intersection + smooth)#iou
return score.mean()
def soft_dice_loss(self, y_true, y_pred):
loss = 1 - self.soft_dice_coeff(y_true, y_pred)
return loss
def __call__(self, y_true, y_pred):
y_prediction = self.softmax(y_pred)
# print("=====================")
# print(y_true.size(),y_pred.size(),y_prediction.size())
# print("=====================")
y_mask_one_hot = self.multi_class_one_hot(y_true, classes=12)
y_ce_true = y_true.squeeze(dim=1).long()
# print(y_prediction.size())
# print(y_mask_one_hot.size())
a = self.ce_loss(y_pred, y_ce_true)
# b = self.multi_class_dice_loss(y_prediction, y_mask_one_hot)
return a
import torch
import torch.nn as nn
class DiceLoss(nn.Module):
def __init__(self):
super(DiceLoss, self).__init__()
def forward(self, input, target):
N, H, W = target.size(0), target.size(2), target.size(3)
smooth = 1
input_flat = input.view(N, -1)
target_flat = target.view(N, -1)
intersection = input_flat * target_flat
loss = 2 * (intersection.sum(1) + smooth) / (input_flat.sum(1) + target_flat.sum(1) + smooth)
loss = 1 - loss.sum() / N
return loss
class MulticlassDiceLoss(nn.Module):
"""
requires one hot encoded target. Applies DiceLoss on each class iteratively.
requires input.shape[0:1] and target.shape[0:1] to be (N, C) where N is
batch size and C is number of classes
"""
def __init__(self):
super(MulticlassDiceLoss, self).__init__()
def forward(self, input, target, weights=None):
C = target.shape[1]
# if weights is None:
# weights = torch.ones(C) #uniform weights for all classes
dice = DiceLoss()
totalLoss = 0
for i in range(C):
diceLoss = dice(input[:, i, :, :], target[:, i,:, :])
if weights is not None:
diceLoss *= weights[i]
totalLoss += diceLoss
return totalLoss
class FocalLoss(nn.Module):
def __init__(self, gamma=0, alpha=None, size_average=True):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
if isinstance(alpha,(float,int)): self.alpha = torch.Tensor([alpha,1-alpha])
if isinstance(alpha,list): self.alpha = torch.Tensor(alpha)
self.size_average = size_average
def forward(self, target, input):
target1 = torch.squeeze(target, dim=1)
if input.dim()>2:
input = input.view(input.size(0),input.size(1),-1) # N,C,H,W => N,C,H*W
input = input.transpose(1,2) # N,C,H*W => N,H*W,C
input = input.contiguous().view(-1,input.size(2)) # N,H*W,C => N*H*W,C
target2 = target1.view(-1,1).long()
logpt = F.log_softmax(input, dim=1)
# print(logpt.size())
# print(target2.size())
logpt = logpt.gather(1,target2)
logpt = logpt.view(-1)
pt = Variable(logpt.data.exp())
if self.alpha is not None:
if self.alpha.type()!=input.data.type():
self.alpha = self.alpha.type_as(input.data)
at = self.alpha.gather(0,target.data.view(-1))
logpt = logpt * Variable(at)
loss = -1 * (1-pt)**self.gamma * logpt
if self.size_average: return loss.mean()
else: return loss.sum() | [
"[email protected]"
] | |
515604f2abc3e0df4872c067064277872c874543 | 6f65ebe31650b73e9c5e77d598295eb1362702bd | /tools/Polygraphy/tests/logger/test_logger.py | 078390e71a50b887380cad96436dc31ad37a6be2 | [
"Apache-2.0",
"BSD-3-Clause",
"ISC",
"BSD-2-Clause",
"MIT"
] | permissive | hierarchyJK/TensorRT | d9b5be9964e54af8b2789a6e98f393519956ed90 | c2668947ea9ba4c73eb1182c162101f09ff250fd | refs/heads/master | 2023-06-26T07:01:08.922681 | 2021-07-12T09:28:23 | 2021-07-13T20:35:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,119 | py | #
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tempfile
from polygraphy.logger.logger import Logger
# We don't use the global logger here because we would have to reset the state each time.
class TestLogger(object):
def test_log_file(self):
logger = Logger()
with tempfile.NamedTemporaryFile("w+") as log_file:
logger.log_file = log_file.name
assert logger.log_file == log_file.name
logger.info("Hello")
log_file.seek(0)
assert log_file.read() == "[I] Hello\n"
| [
"[email protected]"
] | |
bcc53869bc52dae0e85fc0da278dc4dc84dde550 | 17238532f6d8ced670ab95da3792d12368373d60 | /get_toll_to_ljt_0828.py | a098e09ba31d5a5ed2d8843add299a6b2331e938 | [] | no_license | ljt55119922/lygh | 4b3b4262ecf0bea576a2e20f6ef384d7736bcd68 | 707f14a26736b77b720790f756939ce577d55dba | refs/heads/master | 2020-03-29T18:56:15.993829 | 2018-09-25T09:20:53 | 2018-09-25T09:20:53 | 150,239,058 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,432 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Filename: get_toll_to_ljt_0828
# Author : yuanzhou
# Date : 2018/8/28
# Description:
import requests
import pandas as pd
import numpy as np
import json
import matplotlib.pyplot as plt
import time
import functools
import os
from lonlat2adinfo_new2 import main_func_lonlat2ad, main_func_batch_True, get_boundary_data_by_file
from provs_toll_rules import provs_toll_rules
plt.rcParams['font.family'] = ['FangSong_GB2312']
def truck_type(truck_weight):
if truck_weight < 2:
return 1
elif 2 <= truck_weight < 5:
return 2
elif 5 <= truck_weight < 10:
return 3
elif 10 <= truck_weight < 15:
return 4
else:
return 5
def oil_expend(truck_weight, prov, road_level):
if truck_weight < 15:
result = 25
elif truck_weight >= 15 and truck_weight <= 49:
result = (truck_weight - 15) * 0.5 + 25
else:
result = 42
if prov in ['贵州省', '云南省']:
result *= 1.1
if road_level == 0:
result *= 1.15
return result
def get_oil_price():
oil_price = pd.read_excel('在线油站&价格(第二版).xlsx')
oil_city_price = pd.DataFrame(oil_price.groupby(['市'])['企业采购价']
.apply(lambda x: np.nanmean(x))).reset_index()
oil_prov_price = pd.DataFrame(oil_price.groupby(['省'])['企业采购价']
.apply(lambda x: np.nanmean(x))).reset_index()
return oil_city_price, oil_prov_price
def ETC_for_truck(start_GPS, end_GPS, truck_weight, axle=None, wheels=None, front_length=None):
"""
Using the ETC price of car to calculate the ETC price of truck
:param start_GPS: tuple (start_x, start_y)
:param end_GPS: tuple (end_x, end_y)
:param truck_weight: float
:return: dict {0:(price, time), ...}
"""
# start_x, start_y = 106.550464, 29.563761
# end_x, end_y = 111.286451, 30.69187
# truck_weight = 40.1
truck_weight = int(truck_weight)
type_ = truck_type(truck_weight)
start_x, start_y = start_GPS
end_x, end_y = end_GPS
key = 'a0402bea88287c7ee36020fabd995539'
url = 'https://restapi.amap.com/v3/direction/driving?' \
'key=%s&' \
'origin=%s,%s&' \
'destination=%s,%s&' \
'originid=&' \
'destinationid=&' \
'extensions=base&' \
'strategy=10&' \
'waypoints=&' \
'avoidpolygons=&' \
'avoidroad=' % (key, start_x, start_y, end_x, end_y)
html = requests.get(url)
data = str(html.content, 'utf-8')
data = eval(data)
if data['status'] == '0':
print('Oops! Maybe the key is wrong')
else:
n_lines = len(data['route']['paths'])
result = {}
batch_size = 20
# 油价数据
oil_city_price, oil_prov_price = get_oil_price()
# 推荐线路计算
for l in range(n_lines):
temp = data['route']['paths'][l]['steps']
df = pd.DataFrame(data=temp)
df['last_point'] = df.polyline.apply(lambda x: x.split(';')[-1])
df['last_point_tuple'] = df.last_point.apply(lambda x: tuple([float(i) for i in x.split(',')]))
point_in = df['last_point_tuple'].tolist()
temp_result = main_func_lonlat2ad(point_in, level_mode='city', batch=True)
# temp1 = df['last_point_tuple'].apply(lambda x: main_func(x, level_mode='city'))
df['curr_prov'] = [i[-2] for i in temp_result]
df['curr_city'] = [i[-1] for i in temp_result]
price_times = df.apply(lambda x: provs_toll_rules(x.curr_prov, truck_weight, road_name=x.road, axle=axle,
wheels=wheels, front_length=front_length)[0], axis=1)
temp = df.copy()
temp['price_times'] = price_times
temp['tolls'] = temp['tolls'].astype(float)
price_temp = int(np.sum(temp['tolls'] * temp['price_times']))
temp.distance = temp.distance.astype(int)
time_temp = np.sum(temp.distance[temp.tolls > 0]) / 1000 / 70 + \
np.sum(temp.distance[temp.tolls == 0]) / 1000 / 35
length_temp = np.round(np.sum(temp.distance) / 1000, decimals=2)
# 百公里油耗
temp['oil_standard'] = temp.apply(lambda x: oil_expend(truck_weight, x.curr_prov, x.tolls), axis=1)
# 各路段油耗
temp['oil_spend'] = temp['distance'] / (1000 * 100) * temp['oil_standard']
oil_spend_temp = np.round(np.sum(temp['oil_spend']), decimals=2)
# 油价
price_dict = {}
temp['curr_city'] = temp['curr_city'].astype(str)
temp['curr_prov'] = temp['curr_prov'].astype(str)
cities_temp = list(temp['curr_city'][temp['curr_city'] != '[]'].unique())
for j in cities_temp:
index_city = oil_city_price.apply(lambda x: True if j[:2] in x.市 else False, axis=1)
if np.sum(index_city) == 0:
continue
else:
p = oil_city_price.loc[index_city, '企业采购价'].values[0]
price_dict[j] = np.round(p, decimals=2)
provs_temp = list(temp['curr_prov'][temp['curr_prov'].apply(lambda x: len(x)) != 0].unique())
for j in provs_temp:
index_city = oil_prov_price.apply(lambda x: True if j[:2] in x.省 else False, axis=1)
if np.sum(index_city) == 0:
continue
else:
p = oil_prov_price.loc[index_city, '企业采购价'].values[0]
price_dict[j] = np.round(p, decimals=2)
temp['oil_city'] = temp['curr_city'].map(price_dict)
temp['oil_prov'] = temp['curr_prov'].map(price_dict)
temp['oil_final'] = temp.apply(lambda x: x.oil_prov if np.isnan(x.oil_city) else x.oil_city, axis=1)
oil_spend_price_temp = np.round(np.sum(temp['oil_final'] * temp['oil_spend']))
result[l] = {'length': length_temp, 'time': time_temp, 'ETC': price_temp, 'diesel_spend': oil_spend_temp,
'diesel_price': oil_spend_price_temp}
# print(result)
return result
if __name__ == '__main__':
truck_weight = 49
axle = 3
wheels = 12
front_length = 2.6
# 东莞-娄底
start_GPS = (113.746262, 23.046237)
end_GPS = (112.008497, 27.728136)
result = ETC_for_truck(start_GPS, end_GPS, truck_weight, axle=axle, wheels=wheels, front_length=front_length)
print('东莞-娄底: ', result)
# 东莞-昆明
start_GPS = (113.746262, 23.046237)
end_GPS = (102.833722, 24.881539)
result = ETC_for_truck(start_GPS, end_GPS, truck_weight, axle=axle, wheels=wheels, front_length=front_length)
print('东莞-昆明: ', result)
# 南宁-东莞
start_GPS = (108.36637, 22.817746)
end_GPS = (113.746262, 23.046237)
result = ETC_for_truck(start_GPS, end_GPS, truck_weight, axle=axle, wheels=wheels, front_length=front_length)
print('南宁-东莞: ', result)
# 南宁-成都
start_GPS = (108.36637, 22.817746)
end_GPS = (104.066143, 30.573095)
result = ETC_for_truck(start_GPS, end_GPS, truck_weight, axle=axle, wheels=wheels, front_length=front_length)
print('南宁-成都: ', result)
# 南宁-贵阳
start_GPS = (108.36637, 22.817746)
end_GPS = (106.630153, 26.647661)
result = ETC_for_truck(start_GPS, end_GPS, truck_weight, axle=axle, wheels=wheels, front_length=front_length)
print('南宁-贵阳: ', result)
# 襄阳-南宁
start_GPS = (112.144146, 32.042426)
end_GPS = (108.36637, 22.817746)
result = ETC_for_truck(start_GPS, end_GPS, truck_weight, axle=axle, wheels=wheels, front_length=front_length)
print('襄阳-南宁: ', result)
# 襄阳-宜昌
start_GPS = (112.144146, 32.042426)
end_GPS = (111.286451, 30.69187)
result = ETC_for_truck(start_GPS, end_GPS, truck_weight, axle=axle, wheels=wheels, front_length=front_length)
print('襄阳-宜昌: ', result)
# 长沙-宜昌
start_GPS = (112.938888, 28.228272)
end_GPS = (111.286451, 30.69187)
result = ETC_for_truck(start_GPS, end_GPS, truck_weight, axle=axle, wheels=wheels, front_length=front_length)
print('长沙-宜昌: ', result)
| [
"[email protected]"
] | |
4e8f3e9f23a13541a250bd396e11056ccbf3020b | 01ab44dc95fc7e7b058f428f3b64e9c9b775fd0d | /setup.py | 350f86d96c3e9e0b27ec41324991fc8fbf77383c | [
"MIT"
] | permissive | hinnefe2/poketrainer | 02a3a45db9627c3e40955f965c5e8ca8bc415b49 | a34e49919f9435e135a5a2775f5253207222e98c | refs/heads/master | 2023-08-14T06:05:13.426754 | 2023-05-23T18:29:11 | 2023-05-23T18:29:11 | 225,231,403 | 0 | 0 | MIT | 2023-07-25T20:49:05 | 2019-12-01T21:10:00 | Python | UTF-8 | Python | false | false | 307 | py | # Always prefer setuptools over distutils
from setuptools import setup, find_packages
from os import path
setup(
name='poketrainer',
version='0.1.0',
description='Webapp for gamifying personal training with pokemon',
url='https://pokefitbit.herokuapp.com/',
packages=find_packages(),
)
| [
"[email protected]"
] | |
e05cc2b30e6bd80effae021dbc1b2b27b0d6c87b | 5d46c3c2cadf8d89668eccfe1fae51c2231e2962 | /dyn/tm/accounts.py | 75503d5610859ae96fab2bb91274b25043401cee | [] | no_license | Blueprint-Marketing/dyn-python | b740188dcb53380d4d0f01aa02384f5e2f8ea517 | f833b39f669f067de9d85d7771699df609ee231b | refs/heads/master | 2021-01-18T08:55:10.710366 | 2016-01-28T20:19:22 | 2016-01-28T20:19:22 | 50,611,641 | 1 | 0 | null | 2016-01-28T20:32:17 | 2016-01-28T20:32:16 | null | UTF-8 | Python | false | false | 58,426 | py | # -*- coding: utf-8 -*-
"""This module contains interfaces for all Account management features of the
REST API
"""
import logging
from .errors import DynectInvalidArgumentError
from .session import DynectSession
from ..compat import force_unicode
__author__ = 'jnappi'
__all__ = ['get_updateusers', 'get_users', 'get_permissions_groups',
'get_contacts', 'get_notifiers', 'UpdateUser', 'User',
'PermissionsGroup', 'UserZone', 'Notifier', 'Contact']
def get_updateusers(search=None):
"""Return a ``list`` of :class:`~dyn.tm.accounts.UpdateUser` objects. If
*search* is specified, then only :class:`~dyn.tm.accounts.UpdateUsers` who
match those search criteria will be returned in the list. Otherwise, all
:class:`~dyn.tm.accounts.UpdateUsers`'s will be returned.
:param search: A ``dict`` of search criteria. Key's in this ``dict`` much
map to an attribute a :class:`~dyn.tm.accounts.UpdateUsers` instance
and the value mapped to by that key will be used as the search criteria
for that key when searching.
:return: a ``list`` of :class:`~dyn.tm.accounts.UpdateUser` objects
"""
uri = '/UpdateUser/'
api_args = {'detail': 'Y'}
response = DynectSession.get_session().execute(uri, 'GET', api_args)
update_users = []
for user in response['data']:
update_users.append(UpdateUser(api=False, **user))
if search is not None:
original = update_users
update_users = []
for uu in original:
for key, val in search.items():
if hasattr(uu, key) and getattr(uu, key) == val:
update_users.append(uu)
return update_users
def get_users(search=None):
"""Return a ``list`` of :class:`~dyn.tm.accounts.User` objects. If *search*
is specified, then only users who match those search parameters will be
returned in the list. Otherwise, all :class:`~dyn.tm.accounts.User`'s will
be returned.
:param search: A ``dict`` of search criteria. Key's in this ``dict`` much
map to an attribute a :class:`~dyn.tm.accounts.User` instance and the
value mapped to by that key will be used as the search criteria for
that key when searching.
:return: a ``list`` of :class:`~dyn.tm.accounts.User` objects
"""
uri = '/User/'
api_args = {'detail': 'Y'}
if search is not None:
search_string = ''
for key, val in search.items():
if search_string != '':
' AND '.join([search_string, '{}:"{}"'.format(key, val)])
else:
search_string = '{}:"{}"'.format(key, val)
api_args['search'] = search_string
response = DynectSession.get_session().execute(uri, 'GET', api_args)
users = []
for user in response['data']:
user_name = None
if 'user_name' in user:
user_name = user['user_name']
del user['user_name']
users.append(User(user_name, api=False, **user))
return users
def get_permissions_groups(search=None):
"""Return a ``list`` of :class:`~dyn.tm.accounts.PermissionGroup` objects.
If *search* is specified, then only
:class:`~dyn.tm.accounts.PermissionGroup`'s that match those search
criteria will be returned in the list. Otherwise, all
:class:`~dyn.tm.accounts.PermissionGroup`'s will be returned.
:param search: A ``dict`` of search criteria. Key's in this ``dict`` much
map to an attribute a :class:`~dyn.tm.accounts.PermissionGroup`
instance and the value mapped to by that key will be used as the search
criteria for that key when searching.
:return: a ``list`` of :class:`~dyn.tm.accounts.PermissionGroup` objects"""
uri = '/PermissionGroup/'
api_args = {'detail': 'Y'}
response = DynectSession.get_session().execute(uri, 'GET', api_args)
groups = []
for group in response['data']:
groups.append(PermissionsGroup(None, api=False, **group))
if search is not None:
original = groups
groups = []
for group in original:
for key, val in search.items():
if hasattr(group, key) and getattr(group, key) == val:
groups.append(group)
return groups
def get_contacts(search=None):
"""Return a ``list`` of :class:`~dyn.tm.accounts.Contact` objects. If
*search* is specified, then only :class:`~dyn.tm.accounts.Contact`'s who
match those search criteria will be returned in the list. Otherwise, all
:class:`~dyn.tm.accounts.Contact`'s will be returned.
:param search: A ``dict`` of search criteria. Key's in this ``dict`` much
map to an attribute a :class:`~dyn.tm.accounts.Contact` instance and
the value mapped to by that key will be used as the search criteria
for that key when searching.
:return: a ``list`` of :class:`~dyn.tm.accounts.Contact` objects"""
uri = '/Contact/'
api_args = {'detail': 'Y'}
response = DynectSession.get_session().execute(uri, 'GET', api_args)
contacts = []
for contact in response['data']:
if 'nickname' in contact:
contact['_nickname'] = contact['nickname']
del contact['nickname']
contacts.append(Contact(None, api=False, **contact))
if search is not None:
original = contacts
contacts = []
for contact in original:
for key, val in search.items():
if hasattr(contact, key) and getattr(contact, key) == val:
contacts.append(contact)
return contacts
def get_notifiers(search=None):
"""Return a ``list`` of :class:`~dyn.tm.accounts.Notifier` objects. If
*search* is specified, then only :class:`~dyn.tm.accounts.Notifier`'s who
match those search criteria will be returned in the list. Otherwise, all
:class:`~dyn.tm.accounts.Notifier`'s will be returned.
:param search: A ``dict`` of search criteria. Key's in this ``dict`` much
map to an attribute a :class:`~dyn.tm.accounts.Notifier` instance and
the value mapped to by that key will be used as the search criteria for
that key when searching.
:return: a ``list`` of :class:`~dyn.tm.accounts.Notifier` objects"""
uri = '/Notifier/'
api_args = {'detail': 'Y'}
response = DynectSession.get_session().execute(uri, 'GET', api_args)
notifiers = []
for notifier in response['data']:
notifiers.append(Notifier(None, api=False, **notifier))
if search is not None:
original = notifiers
notifiers = []
for notifier in original:
for key, val in search.items():
if hasattr(notifier, key) and getattr(notifier, key) == val:
notifiers.append(notifier)
return notifiers
class UpdateUser(object):
""":class:`~dyn.tm.accounts.UpdateUser` type objects are a special form of
a :class:`~dyn.tm.accounts.User` which are tied to a specific Dynamic DNS
services.
"""
def __init__(self, *args, **kwargs):
"""Create an :class:`~dyn.tm.accounts.UpdateUser` object
:param user_name: the Username this
:class:`~dyn.tm.accounts.UpdateUser` uses or will use to log in to
the DynECT System. A :class:`~dyn.tm.accounts.UpdateUser`'s
`user_name` is required for both creating and getting
:class:`~dyn.tm.accounts.UpdateUser`'s.
:param nickname: When creating a new
:class:`~dyn.tm.accounts.UpdateUser` on the DynECT System, this
`nickname` will be the System nickname for this
:class:`~dyn.tm.accounts.UpdateUser`
:param password: When creating a new
:class:`~dyn.tm.accounts.UpdateUser` on the DynECT System, this
`password` will be the password this
:class:`~dyn.tm.accounts.UpdateUser` uses to log into the System
"""
super(UpdateUser, self).__init__()
self.uri = '/UpdateUser/'
self._password = self._status = self._user_name = self._nickname = None
if 'api' in kwargs:
good_args = ('user_name', 'status', 'password')
for key, val in kwargs.items():
if key in good_args:
setattr(self, '_' + key, val)
self.uri = '/UpdateUser/{}/'.format(self._user_name)
elif len(args) + len(kwargs) == 1:
self._get(*args, **kwargs)
else:
self._post(*args, **kwargs)
def _post(self, nickname, password):
"""Create a new :class:`~dyn.tm.accounts.UpdateUser` on the DynECT
System
"""
self._nickname = nickname
self._password = password
uri = '/UpdateUser/'
api_args = {'nickname': self._nickname,
'password': self._password}
response = DynectSession.get_session().execute(uri, 'POST', api_args)
self._build(response['data'])
self.uri = '/UpdateUser/{}/'.format(self._user_name)
def _get(self, user_name):
"""Get an existing :class:`~dyn.tm.accounts.UpdateUser` from the
DynECT System
"""
self._user_name = user_name
self.uri = '/UpdateUser/{}/'.format(self._user_name)
response = DynectSession.get_session().execute(self.uri, 'GET')
self._build(response['data'])
def _build(self, data):
for key, val in data.items():
setattr(self, '_' + key, val)
def _update(self, api_args=None):
response = DynectSession.get_session().execute(self.uri, 'PUT',
api_args)
self._build(response['data'])
@property
def user_name(self):
"""This :class:`~dyn.tm.accounts.UpdateUser`'s `user_name`. An
:class:`~dyn.tm.accounts.UpdateUser`'s user_name is a read-only
property which can not be updated after the :class:`UpdateUser` has
been created.
"""
return self._user_name
@user_name.setter
def user_name(self, value):
pass
@property
def nickname(self):
"""This :class:`~dyn.tm.accounts.UpdateUser`s `nickname`. An
:class:`~dyn.tm.accounts.UpdateUser`'s `nickname` is a read-only
property which can not be updated after the
:class:`~dyn.tm.accounts.UpdateUser` has been created.
"""
return self._nickname
@nickname.setter
def nickname(self, value):
pass
@property
def status(self):
"""The current `status` of an :class:`~dyn.tm.accounts.UpdateUser` will
be one of either 'active' or 'blocked'. Blocked
:class:`~dyn.tm.accounts.UpdateUser`'s are unable to log into the
DynECT System, where active :class:`~dyn.tm.accounts.UpdateUser`'s are.
"""
return self._status
@status.setter
def status(self, value):
pass
@property
def password(self):
"""The current `password` for this
:class:`~dyn.tm.accounts.UpdateUser`. An
:class:`~dyn.tm.accounts.UpdateUser`'s `password` may be reassigned.
"""
if self._password is None or self._password == u'':
self._get(self._user_name)
return self._password
@password.setter
def password(self, new_password):
"""Update this :class:`~dyn.tm.accounts.UpdateUser`'s password to be
the provided password
:param new_password: The new password to use
"""
api_args = {'password': new_password}
self._update(api_args)
def block(self):
"""Set the status of this :class:`~dyn.tm.accounts.UpdateUser` to
'blocked'. This will prevent this :class:`~dyn.tm.accounts.UpdateUser`
from logging in until they are explicitly unblocked.
"""
api_args = {'block': True}
self._update(api_args)
def unblock(self):
"""Set the status of this :class:`~dyn.tm.accounts.UpdateUser` to
'active'. This will re-enable this :class:`~dyn.tm.accounts.UpdateUser`
to be able to login if they were previously blocked.
"""
api_args = {'unblock': True}
self._update(api_args)
def sync_password(self):
"""Pull in this :class:`~dyn.tm.accounts.UpdateUser` current password
from the DynECT System, in the unlikely event that this
:class:`~dyn.tm.accounts.UpdateUser` object's password may have gotten
out of sync
"""
api_args = {'user_name': self._user_name}
self._update(api_args)
def delete(self):
"""Delete this :class:`~dyn.tm.accounts.UpdateUser` from the DynECT
System. It is important to note that this operation may not be undone.
"""
DynectSession.get_session().execute(self.uri, 'DELETE')
def __str__(self):
"""Custom str method"""
return force_unicode('<UpdateUser>: {}').format(self.user_name)
__repr__ = __unicode__ = __str__
def __bytes__(self):
"""bytes override"""
return bytes(self.__str__())
class User(object):
"""DynECT System User object"""
def __init__(self, user_name, *args, **kwargs):
"""Create a new :class:`~dyn.tm.accounts.User` object
:param user_name: This :class:`~dyn.tm.accounts.User`'s system
username; used for logging into the system
:param password: Password for this :class:`~dyn.tm.accounts.User`
account
:param email: This :class:`~dyn.tm.accounts.User`'s Email address
:param first_name: This :class:`~dyn.tm.accounts.User`'s first name
:param last_name: This :class:`~dyn.tm.accounts.User`'s last name
:param nickname: The nickname for the `Contact` associated with this
:class:`~dyn.tm.accounts.User`
:param organization: This :class:`~dyn.tm.accounts.User`'s organization
:param phone: This :class:`~dyn.tm.accounts.User`'s phone number. Can
be of the form: (0) ( country-code ) ( local number ) ( extension )
Only the country-code (1-3 digits) and local number (at least 7
digits) are required. The extension can be up to 4 digits. Any
non-digits are ignored.
:param address: This :class:`~dyn.tm.accounts.User`'s street address
:param address2: This :class:`~dyn.tm.accounts.User`'s street address,
line 2
:param city: This :class:`~dyn.tm.accounts.User`'s city, part of the
user's address
:param country: This :class:`~dyn.tm.accounts.User`'s country, part of
the user's address
:param fax: This :class:`~dyn.tm.accounts.User`'s fax number
:param notify_email: Email address where this
:class:`~dyn.tm.accounts.User` should receive notifications
:param pager_email: Email address where this
:class:`~dyn.tm.accounts.User` should receive messages destined
for a pager
:param post_code: Zip code or Postal code
:param group_name: A list of permission groups this
:class:`~dyn.tm.accounts.User` belongs to
:param permission: A list of permissions assigned to this
:class:`~dyn.tm.accounts.User`
:param zone: A list of zones where this
:class:`~dyn.tm.accounts.User`'s permissions apply
:param forbid: A list of forbidden permissions for this
:class:`~dyn.tm.accounts.User`
:param status: Current status of this :class:`~dyn.tm.accounts.User`
:param website: This :class:`~dyn.tm.accounts.User`'s website
"""
super(User, self).__init__()
self._user_name = user_name
self.uri = '/User/{}/'.format(self._user_name)
self._password = self._email = self._first_name = self._last_name = None
self._nickname = self._organization = self._phone = self._address = None
self._address_2 = self._city = self._country = self._fax = None
self._notify_email = self._pager_email = self._post_code = None
self._group_name = self._permission = self._zone = self._forbid = None
self._status = self._website = None
self.permissions = []
self.permission_groups = []
self.groups = []
if 'api' in kwargs:
del kwargs['api']
for key, val in kwargs.items():
if key != '_user_name':
setattr(self, '_' + key, val)
else:
setattr(self, key, val)
elif len(args) == 0 and len(kwargs) == 0:
self._get()
else:
self._post(*args, **kwargs)
def _post(self, password, email, first_name, last_name, nickname,
organization, phone, address=None, address_2=None, city=None,
country=None, fax=None, notify_email=None, pager_email=None,
post_code=None, group_name=None, permission=None, zone=None,
forbid=None, status=None, website=None):
"""Create a new :class:`~dyn.tm.accounts.User` object on the DynECT
System
"""
self._password = password
self._email = email
self._first_name = first_name
self._last_name = last_name
self._nickname = nickname
self._organization = organization
self._phone = phone
self._address = address
self._address_2 = address_2
self._city = city
self._country = country
self._fax = fax
self._notify_email = notify_email
self._pager_email = pager_email
self._post_code = post_code
self._group_name = group_name
self._permission = permission
self._zone = zone
self._forbid = forbid
self._status = status
self._website = website
response = DynectSession.get_session().execute(self.uri, 'POST', self)
for key, val in response['data'].items():
setattr(self, '_' + key, val)
def _get(self):
"""Get an existing :class:`~dyn.tm.accounts.User` object from the
DynECT System
"""
api_args = {}
response = DynectSession.get_session().execute(self.uri, 'GET',
api_args)
for key, val in response['data'].items():
setattr(self, '_' + key, val)
def _update(self, api_args=None):
response = DynectSession.get_session().execute(self.uri, 'PUT',
api_args)
for key, val in response['data'].items():
setattr(self, '_' + key, val)
@property
def user_name(self):
"""A :class:`~dyn.tm.accounts.User`'s user_name is a read-only property
"""
return self._user_name
@user_name.setter
def user_name(self, value):
pass
@property
def status(self):
"""A :class:`~dyn.tm.accounts.User`'s status is a read-only property.
To change you must use the :meth:`block`/:meth:`unblock` methods
"""
return self._status
@status.setter
def status(self, value):
pass
@property
def email(self):
"""This :class:`~dyn.tm.accounts.User`'s Email address"""
return self._email
@email.setter
def email(self, value):
api_args = {'email': value}
self._update(api_args)
@property
def first_name(self):
"""This :class:`~dyn.tm.accounts.User`'s first name"""
return self._first_name
@first_name.setter
def first_name(self, value):
api_args = {'first_name': value}
self._update(api_args)
@property
def last_name(self):
"""This :class:`~dyn.tm.accounts.User`'s last name"""
return self._last_name
@last_name.setter
def last_name(self, value):
api_args = {'last_name': value}
self._update(api_args)
@property
def nickname(self):
"""The nickname for the `Contact` associated with this
:class:`~dyn.tm.accounts.User`"""
return self._nickname
@nickname.setter
def nickname(self, value):
api_args = {'nickname': value}
self._update(api_args)
@property
def organization(self):
"""This :class:`~dyn.tm.accounts.User`'s organization"""
return self._organization
@organization.setter
def organization(self, value):
api_args = {'organization': value}
self._update(api_args)
@property
def phone(self):
"""This :class:`~dyn.tm.accounts.User`'s phone number. Can be of the
form: (0) ( country-code ) ( local number ) ( extension ) Only the
country-code (1-3 digits) and local number (at least 7 digits) are
required. The extension can be up to 4 digits. Any non-digits are
ignored.
"""
return self._phone
@phone.setter
def phone(self, value):
api_args = {'phone': value}
self._update(api_args)
@property
def address(self):
"""This :class:`~dyn.tm.accounts.User`'s street address"""
return self._address
@address.setter
def address(self, value):
api_args = {'address': value}
self._update(api_args)
@property
def address_2(self):
"""This :class:`~dyn.tm.accounts.User`'s street address, line 2"""
return self._address_2
@address_2.setter
def address_2(self, value):
api_args = {'address_2': value}
self._update(api_args)
@property
def city(self):
"""This :class:`~dyn.tm.accounts.User`'s city, part of the user's
address
"""
return self._city
@city.setter
def city(self, value):
api_args = {'city': value}
self._update(api_args)
@property
def country(self):
"""This :class:`~dyn.tm.accounts.User`'s country, part of the user's
address
"""
return self._country
@country.setter
def country(self, value):
api_args = {'country': value}
self._update(api_args)
@property
def fax(self):
"""This :class:`~dyn.tm.accounts.User`'s fax number"""
return self._fax
@fax.setter
def fax(self, value):
api_args = {'fax': value}
self._update(api_args)
@property
def notify_email(self):
"""Email address where this :class:`~dyn.tm.accounts.User` should
receive notifications
"""
return self._notify_email
@notify_email.setter
def notify_email(self, value):
api_args = {'notify_email': value}
self._update(api_args)
@property
def pager_email(self):
"""Email address where this :class:`~dyn.tm.accounts.User` should
receive messages destined for a pager
"""
return self._pager_email
@pager_email.setter
def pager_email(self, value):
api_args = {'pager_email': value}
self._update(api_args)
@property
def post_code(self):
"""This :class:`~dyn.tm.accounts.User`'s postal code, part of the
user's address
"""
return self._post_code
@post_code.setter
def post_code(self, value):
api_args = {'post_code': value}
self._update(api_args)
@property
def group_name(self):
"""A list of permission groups this :class:`~dyn.tm.accounts.User`
belongs to
"""
return self._group_name
@group_name.setter
def group_name(self, value):
api_args = {'group_name': value}
self._update(api_args)
@property
def permission(self):
"""A list of permissions assigned to this
:class:`~dyn.tm.accounts.User`
"""
return self._permission
@permission.setter
def permission(self, value):
api_args = {'permission': value}
self._update(api_args)
@property
def zone(self):
"""A list of zones where this :class:`~dyn.tm.accounts.User`'s
permissions apply
"""
return self._zone
@zone.setter
def zone(self, value):
api_args = {'zone': value}
self._update(api_args)
@property
def forbid(self):
"""A list of forbidden permissions for this
:class:`~dyn.tm.accounts.User`
"""
return self._forbid
@forbid.setter
def forbid(self, value):
"""Apply a new list of forbidden permissions for the
:class:`~dyn.tm.accounts.User`
"""
api_args = {'forbid': value}
self._update(api_args)
@property
def website(self):
"""This :class:`~dyn.tm.accounts.User`'s website"""
return self._website
@website.setter
def website(self, value):
api_args = {'website': value}
self._update(api_args)
def block(self):
"""Blocks this :class:`~dyn.tm.accounts.User` from logging in"""
api_args = {'block': 'True'}
uri = '/User/{}/'.format(self._user_name)
response = DynectSession.get_session().execute(uri, 'PUT', api_args)
self._status = response['data']['status']
def unblock(self):
"""Restores this :class:`~dyn.tm.accounts.User` to an active status and
re-enables their log-in
"""
api_args = {'unblock': 'True'}
uri = '/User/{}/'.format(self._user_name)
response = DynectSession.get_session().execute(uri, 'PUT', api_args)
self._status = response['data']['status']
def add_permission(self, permission):
"""Add individual permissions to this :class:`~dyn.tm.accounts.User`
:param permission: the permission to add
"""
self.permissions.append(permission)
uri = '/UserPermissionEntry/{}/{}/'.format(self._user_name, permission)
DynectSession.get_session().execute(uri, 'POST')
def replace_permissions(self, permissions=None):
"""Replaces the list of permissions for this
:class:`~dyn.tm.accounts.User`
:param permissions: A list of permissions. Pass an empty list or omit
the argument to clear the list of permissions of the
:class:`~dyn.tm.accounts.User`
"""
api_args = {}
if permissions is not None:
api_args['permissions'] = permissions
self.permissions = permissions
else:
self.permissions = []
uri = '/UserPermissionEntry/{}/'.format(self._user_name)
DynectSession.get_session().execute(uri, 'PUT', api_args)
def delete_permission(self, permission):
"""Remove this specific permission from the
:class:`~dyn.tm.accounts.User`
:param permission: the permission to remove
"""
if permission in self.permissions:
self.permissions.remove(permission)
uri = '/UserPermissionEntry/{}/{}/'.format(self._user_name, permission)
DynectSession.get_session().execute(uri, 'DELETE')
def add_permissions_group(self, group):
"""Assigns the permissions group to this :class:`~dyn.tm.accounts.User`
:param group: the permissions group to add to this
:class:`~dyn.tm.accounts.User`
"""
self.permission_groups.append(group)
uri = '/UserGroupEntry/{}/{}/'.format(self._user_name, group)
DynectSession.get_session().execute(uri, 'POST')
def replace_permissions_group(self, groups=None):
"""Replaces the list of permissions for this
:class:`~dyn.tm.accounts.User`
:param groups: A list of permissions groups. Pass an empty list or omit
the argument to clear the list of permissions groups of the
:class:`~dyn.tm.accounts.User`
"""
api_args = {}
if groups is not None:
api_args['groups'] = groups
self.groups = groups
else:
self.groups = []
uri = '/UserGroupEntry/{}/'.format(self._user_name)
DynectSession.get_session().execute(uri, 'PUT', api_args)
def delete_permissions_group(self, group):
"""Removes the permissions group from the
:class:`~dyn.tm.accounts.User`
:param group: the permissions group to remove from this
:class:`~dyn.tm.accounts.User`
"""
if group in self.permissions:
self.permission_groups.remove(group)
uri = '/UserGroupEntry/{}/{}/'.format(self._user_name, group)
DynectSession.get_session().execute(uri, 'DELETE')
def add_forbid_rule(self, permission, zone=None):
"""Adds the forbid rule to the :class:`~dyn.tm.accounts.User`'s
permission group
:param permission: the permission to forbid from this
:class:`~dyn.tm.accounts.User`
:param zone: A list of zones where the forbid rule applies
"""
api_args = {}
if zone is not None:
api_args['zone'] = zone
uri = '/UserForbidEntry/{}/{}/'.format(self._user_name, permission)
DynectSession.get_session().execute(uri, 'POST', api_args)
def replace_forbid_rules(self, forbid=None):
"""Replaces the list of forbidden permissions in the
:class:`~dyn.tm.accounts.User`'s permissions group with a new list.
:param forbid: A list of rules to replace the forbidden rules on the
:class:`~dyn.tm.accounts.User`'s permission group. If empty or not
passed in, the :class:`~dyn.tm.accounts.User`'s forbid list will be
cleared
"""
api_args = {}
if forbid is not None:
api_args['forbid'] = forbid
uri = '/UserForbidEntry/{}/'.format(self._user_name)
DynectSession.get_session().execute(uri, 'PUT', api_args)
def delete_forbid_rule(self, permission, zone=None):
"""Removes a forbid permissions rule from the
:class:`~dyn.tm.accounts.User`'s permission group
:param permission: permission
:param zone: A list of zones where the forbid rule applies
"""
api_args = {}
if zone is not None:
api_args['zone'] = zone
uri = '/UserForbidEntry/{}/{}/'.format(self._user_name, permission)
DynectSession.get_session().execute(uri, 'DELETE', api_args)
def delete(self):
"""Delete this :class:`~dyn.tm.accounts.User` from the system"""
uri = '/User/{}/'.format(self._user_name)
DynectSession.get_session().execute(uri, 'DELETE')
def __str__(self):
"""Custom str method"""
return force_unicode('<User>: {}').format(self.user_name)
__repr__ = __unicode__ = __str__
def __bytes__(self):
"""bytes override"""
return bytes(self.__str__())
class PermissionsGroup(object):
"""A DynECT System Permissions Group object"""
def __init__(self, group_name, *args, **kwargs):
"""Create a new permissions Group
:param group_name: The name of the permission group to update
:param description: A description of the permission group
:param group_type: The type of the permission group. Valid values:
plain or default
:param all_users: If 'Y', all current users will be added to the group.
Cannot be used if user_name is passed in
:param permission: A list of permissions that the group contains
:param user_name: A list of users that belong to the permission group
:param subgroup: A list of groups that belong to the permission group
:param zone: A list of zones where the group's permissions apply
"""
super(PermissionsGroup, self).__init__()
self._group_name = group_name
self._description = self._group_type = self._all_users = None
self._permission = self._user_name = self._subgroup = self._zone = None
self.uri = '/PermissionGroup/{}/'.format(self._group_name)
if 'api' in kwargs:
del kwargs['api']
for key, val in kwargs.items():
setattr(self, '_' + key, val)
elif len(args) == 0 and len(kwargs) == 0:
self._get()
else:
self._post(*args, **kwargs)
def _post(self, description, group_type=None, all_users=None,
permission=None, user_name=None, subgroup=None, zone=None):
"""Create a new :class:`~dyn.tm.accounts.PermissionsGroup` on the
DynECT System
"""
self._description = description
self._group_type = group_type
self._all_users = all_users
self._permission = permission
self._user_name = user_name
self._subgroup = subgroup
self._zone = zone
api_args = {}
# Any fields that were not explicitly set should not be passed through
for key, val in self.__dict__.items():
if val is not None and not hasattr(val, '__call__') and \
key.startswith('_'):
if key is '_group_type':
api_args['type'] = val
else:
api_args[key[1:]] = val
uri = '/PermissionGroup/{}/'.format(self._group_name)
response = DynectSession.get_session().execute(uri, 'POST', api_args)
for key, val in response['data'].items():
if key == 'type':
setattr(self, '_group_type', val)
elif key == 'zone':
self._zone = []
for zone in val:
self._zone.append(zone['zone_name'])
else:
setattr(self, '_' + key, val)
def _get(self):
"""Get an existing :class:`~dyn.tm.accounts.PermissionsGroup` from the
DynECT System
"""
response = DynectSession.get_session().execute(self.uri, 'GET')
for key, val in response['data'].items():
if key == 'type':
setattr(self, '_group_type', val)
elif key == 'zone':
self._zone = []
for zone in val:
self._zone.append(zone['zone_name'])
else:
setattr(self, '_' + key, val)
def _update(self, api_args=None):
response = DynectSession.get_session().execute(self.uri, 'PUT',
api_args)
for key, val in response['data'].items():
if key == 'type':
setattr(self, '_group_type', val)
elif key == 'zone':
self._zone = []
for zone in val:
self._zone.append(zone['zone_name'])
else:
setattr(self, '_' + key, val)
@property
def group_name(self):
"""The name of this permission group"""
return self._group_name
@group_name.setter
def group_name(self, value):
new_group_name = value
api_args = {'new_group_name': new_group_name,
'group_name': self._group_name}
self._update(api_args)
self._group_name = new_group_name
self.uri = '/PermissionGroup/{}/'.format(self._group_name)
@property
def description(self):
"""A description of this permission group"""
return self._description
@description.setter
def description(self, value):
self._description = value
api_args = {'group_name': self._group_name,
'description': self._description}
self._update(api_args)
@property
def group_type(self):
"""The type of this permission group"""
return self._group_type
@group_type.setter
def group_type(self, value):
self._group_type = value
api_args = {'type': self._group_type,
'group_name': self._group_name}
self._update(api_args)
@property
def all_users(self):
"""If 'Y', all current users will be added to the group. Cannot be
used if user_name is passed in
"""
return self._all_users
@all_users.setter
def all_users(self, value):
self._all_users = value
api_args = {'all_users': self._all_users,
'group_name': self._group_name}
self._update(api_args)
@property
def permission(self):
"""A list of permissions that this group contains"""
return self._permission
@permission.setter
def permission(self, value):
self._permission = value
api_args = {'permission': self._permission,
'group_name': self._group_name}
self._update(api_args)
@property
def user_name(self):
"""A list of users that belong to the permission group"""
return self._user_name
@user_name.setter
def user_name(self, value):
self._user_name = value
api_args = {'user_name': self._user_name,
'group_name': self._group_name}
self._update(api_args)
@property
def subgroup(self):
"""A list of groups that belong to the permission group"""
return self._subgroup
@subgroup.setter
def subgroup(self, value):
self._subgroup = value
api_args = {'subgroup': self._subgroup,
'group_name': self._group_name}
self._update(api_args)
@property
def zone(self):
"""A list of users that belong to the permission group"""
return self._zone
@zone.setter
def zone(self, value):
self._zone = value
api_args = {'zone': self._zone,
'group_name': self._group_name}
self._update(api_args)
def delete(self):
"""Delete this permission group"""
uri = '/PermissionGroup/{}/'.format(self._group_name)
DynectSession.get_session().execute(uri, 'DELETE')
def add_permission(self, permission):
"""Adds individual permissions to the user
:param permission: the permission to add to this user
"""
uri = '/PermissionGroupPermissionEntry/{}/{}/'.format(self._group_name,
permission)
DynectSession.get_session().execute(uri, 'POST')
self._permission.append(permission)
def replace_permissions(self, permission=None):
"""Replaces a list of individual user permissions for the user
:param permission: A list of permissions. Pass an empty list or omit
the argument to clear the list of permissions of the user
"""
api_args = {}
if permission is not None:
api_args['permission'] = permission
uri = '/PermissionGroupPermissionEntry/{}/'.format(self._group_name)
DynectSession.get_session().execute(uri, 'PUT', api_args)
if permission:
self._permission = permission
else:
self._permission = []
def remove_permission(self, permission):
"""Removes the specific permission from the user
:param permission: the permission to remove
"""
uri = '/PermissionGroupPermissionEntry/{}/{}/'.format(self._group_name,
permission)
DynectSession.get_session().execute(uri, 'DELETE')
self._permission.remove(permission)
def add_zone(self, zone, recurse='Y'):
"""Add a new Zone to this :class:`~dyn.tm.accounts.PermissionsGroup`
:param zone: The name of the Zone to be added to this
:class:`~dyn.tm.accounts.PermissionsGroup`
:param recurse: A flag determining whether or not to add all sub-nodes
of a Zone to this :class:`~dyn.tm.accounts.PermissionsGroup`
"""
api_args = {'recurse': recurse}
uri = '/PermissionGroupZoneEntry/{}/{}/'.format(self._group_name, zone)
DynectSession.get_session().execute(uri, 'POST', api_args)
self._zone.append(zone)
def add_subgroup(self, name):
"""Add a new Sub group to this
:class:`~dyn.tm.accounts.PermissionsGroup`
:param name: The name of the :class:`~dyn.tm.accounts.PermissionsGroup`
to be added to this :class:`~dyn.tm.accounts.PermissionsGroup`'s
subgroups
"""
uri = '/PermissionGroupSubgroupEntry/{}/{}/'.format(self._group_name,
name)
DynectSession.get_session().execute(uri, 'POST')
self._subgroup.append(name)
def update_subgroup(self, subgroups):
"""Update the subgroups under this
:class:`~dyn.tm.accounts.PermissionsGroup`
:param subgroups: The subgroups with updated information
"""
api_args = {'subgroup': subgroups}
uri = '/PermissionGroupSubgroupEntry/{}/'.format(self._group_name)
DynectSession.get_session().execute(uri, 'PUT', api_args)
self._subgroup = subgroups
def delete_subgroup(self, name):
"""Remove a Subgroup from this
:class:`~dyn.tm.accounts.PermissionsGroup`
:param name: The name of the :class:`~dyn.tm.accounts.PermissionsGroup`
to be remoevd from this
:class:`~dyn.tm.accounts.PermissionsGroup`'s subgroups
"""
uri = '/PermissionGroupSubgroupEntry/{}/{}/'.format(self._group_name,
name)
DynectSession.get_session().execute(uri, 'DELETE')
self._subgroup.remove(name)
def __str__(self):
"""Custom str method"""
return force_unicode('<PermissionsGroup>: {}').format(self.group_name)
__repr__ = __unicode__ = __str__
def __bytes__(self):
"""bytes override"""
return bytes(self.__str__())
class UserZone(object):
"""A DynECT system UserZoneEntry"""
def __init__(self, user_name, zone_name, recurse='Y'):
super(UserZone, self).__init__()
self._user_name = user_name
self._zone_name = zone_name
self._recurse = recurse
api_args = {'recurse': self._recurse}
uri = '/UserZoneEntry/{}/{}/'.format(self._user_name, self._zone_name)
respnose = DynectSession.get_session().execute(uri, 'POST', api_args)
for key, val in respnose['data'].items():
setattr(self, '_' + key, val)
@property
def user_name(self):
"""User_name property of :class:`~dyn.tm.accounts.UserZone` object is
read only
"""
return self._user_name
@user_name.setter
def user_name(self, value):
pass
@property
def recurse(self):
"""Indicates whether or not permissions should apply to subnodes of
the `zone_name` as well
"""
return self._recurse
@recurse.setter
def recurse(self, value):
self._recurse = value
api_args = {'recurse': self._recurse, 'zone_name': self._zone_name}
uri = '/UserZoneEntry/{}/'.format(self._user_name)
DynectSession.get_session().execute(uri, 'PUT', api_args)
def update_zones(self, zone=None):
"""Replacement list zones where the user will now have permissions.
Pass an empty list or omit the argument to clear the user's zone
permissions
:param zone: a list of zone names where the user will now have
permissions
"""
if zone is None:
zone = []
api_args = {'zone': []}
for zone_data in zone:
api_args['zone'].append({'zone_name': zone_data})
uri = '/UserZoneEntry/{}/'.format(self._user_name)
respnose = DynectSession.get_session().execute(uri, 'PUT', api_args)
for key, val in respnose['data'].items():
setattr(self, '_' + key, val)
def delete(self):
"""Delete this :class:`~dyn.tm.accounts.UserZone` object from the
DynECT System
"""
api_args = {'recurse': self.recurse}
uri = '/UserZoneEntry/{}/{}/'.format(self._user_name, self._zone_name)
DynectSession.get_session().execute(uri, 'DELETE', api_args)
def __str__(self):
"""Custom str method"""
return force_unicode('<UserZone>: {}').format(self.user_name)
__repr__ = __unicode__ = __str__
def __bytes__(self):
"""bytes override"""
return bytes(self.__str__())
class Notifier(object):
"""DynECT System Notifier"""
def __init__(self, *args, **kwargs):
"""Create a new :class:`~dyn.tm.accounts.Notifier` object
:param label: The label used to identify this
:class:`~dyn.tm.accounts.Notifier`
:param recipients: List of Recipients attached to this
:class:`~dyn.tm.accounts.Notifier`
:param services: List of services attached to this
:class:`~dyn.tm.accounts.Notifier`
:param notifier_id: The system id of this
:class:`~dyn.tm.accounts.Notifier`
"""
super(Notifier, self).__init__()
self._label = self._recipients = self._services = None
self._notifier_id = self.uri = None
if 'api' in kwargs:
del kwargs['api']
for key, val in kwargs.items():
setattr(self, '_' + key, val)
self.uri = '/Notifier/{}/'.format(self._notifier_id)
elif len(args) + len(kwargs) > 1:
self._post(*args, **kwargs)
elif len(kwargs) > 0 or 'label' in kwargs:
self._post(**kwargs)
else:
self._get(*args, **kwargs)
def _post(self, label=None, recipients=None, services=None):
"""Create a new :class:`~dyn.tm.accounts.Notifier` object on the
DynECT System
"""
if label is None:
raise DynectInvalidArgumentError
uri = '/Notifier/'
self._label = label
self._recipients = recipients
self._services = services
response = DynectSession.get_session().execute(uri, 'POST', self)
self._build(response['data'])
self.uri = '/Notifier/{}/'.format(self._notifier_id)
def _get(self, notifier_id):
"""Get an existing :class:`~dyn.tm.accounts.Notifier` object from the
DynECT System
"""
self._notifier_id = notifier_id
self.uri = '/Notifier/{}/'.format(self._notifier_id)
response = DynectSession.get_session().execute(self.uri, 'GET')
self._build(response['data'])
def _build(self, data):
for key, val in data.items():
setattr(self, '_' + key, val)
def _update(self, api_args=None):
response = DynectSession.get_session().execute(self.uri, 'PUT',
api_args)
self._build(response['data'])
@property
def notifier_id(self):
"""The unique System id for this Notifier"""
return self._notifier_id
@notifier_id.setter
def notifier_id(self, value):
pass
@property
def label(self):
"""The label used to identify this :class:`~dyn.tm.accounts.Notifier`
"""
return self._label
@label.setter
def label(self, value):
self._label = value
api_args = {'label': self._label}
self._update(api_args)
@property
def recipients(self):
"""List of Recipients attached to this
:class:`~dyn.tm.accounts.Notifier`
"""
return self._recipients
@recipients.setter
def recipients(self, value):
self._recipients = value
api_args = {'recipients': self._recipients}
self._update(api_args)
@property
def services(self):
"""List of services attached to this
:class:`~dyn.tm.accounts.Notifier`
"""
return self._services
@services.setter
def services(self, value):
self._services = value
api_args = {'services': self._services}
self._update(api_args)
def delete(self):
"""Delete this :class:`~dyn.tm.accounts.Notifier` from the Dynect
System
"""
DynectSession.get_session().execute(self.uri, 'DELETE')
def __str__(self):
"""Custom str method"""
return force_unicode('<Notifier>: {}').format(self.label)
__repr__ = __unicode__ = __str__
def __bytes__(self):
"""bytes override"""
return bytes(self.__str__())
class Contact(object):
"""A DynECT System Contact"""
def __init__(self, nickname, *args, **kwargs):
"""Create a :class:`~dyn.tm.accounts.Contact` object
:param nickname: The nickname for this
:class:`~dyn.tm.accounts.Contact`
:param email: The :class:`~dyn.tm.accounts.Contact`'s email address
:param first_name: The :class:`~dyn.tm.accounts.Contact`'s first name
:param last_name: The :class:`~dyn.tm.accounts.Contact`'s last name
:param organization: The :class:`~dyn.tm.accounts.Contact`'s
organization
:param phone: The :class:`~dyn.tm.accounts.Contact`'s phone number. Can
be of the form: ( 0 ) ( country-code ) ( local number )
( extension ) Only the country-code (1-3 digits) and local number
(at least 7 digits) are required. The extension can be up to 4
digits. Any non-digits are ignored.
:param address: The :class:`~dyn.tm.accounts.Contact`'s street address
:param address2: The :class:`~dyn.tm.accounts.Contact`'s street
address, line 2
:param city: The :class:`~dyn.tm.accounts.Contact`'s city, part of the
user's address
:param country: The :class:`~dyn.tm.accounts.Contact`'s country, part
of the :class:`~dyn.tm.accounts.Contact`'s address
:param fax: The :class:`~dyn.tm.accounts.Contact`'s fax number
:param notify_email: Email address where the
:class:`~dyn.tm.accounts.Contact` should receive notifications
:param pager_email: Email address where the
:class:`~dyn.tm.accounts.Contact` should receive messages destined
for a pager
:param post_code: Zip code or Postal code
:param state: The :class:`~dyn.tm.accounts.Contact`'s state, part of
the :class:`~dyn.tm.accounts.Contact`'s address
:param website: The :class:`~dyn.tm.accounts.Contact`'s website
"""
super(Contact, self).__init__()
self._nickname = nickname
self._email = self._first_name = self._last_name = None
self._organization = self._address = self._address_2 = self._city = None
self._country = self._fax = self._notify_email = None
self._pager_email = self._phone = self._post_code = self._state = None
self._website = None
self.uri = '/Contact/{}/'.format(self._nickname)
if 'api' in kwargs:
del kwargs['api']
for key, val in kwargs.items():
if key != '_nickname':
setattr(self, '_' + key, val)
else:
setattr(self, key, val)
self.uri = '/Contact/{}/'.format(self._nickname)
elif len(args) == 0 and len(kwargs) == 0:
self._get()
else:
self._post(*args, **kwargs)
def _post(self, email, first_name, last_name, organization, address=None,
address_2=None, city=None, country=None, fax=None,
notify_email=None, pager_email=None, phone=None, post_code=None,
state=None, website=None):
"""Create a new :class:`~dyn.tm.accounts.Contact` on the DynECT System
"""
self._email = email
self._first_name = first_name
self._last_name = last_name
self._organization = organization
self._address = address
self._address_2 = address_2
self._city = city
self._country = country
self._fax = fax
self._notify_email = notify_email
self._pager_email = pager_email
self._phone = phone
self._post_code = post_code
self._state = state
self._website = website
response = DynectSession.get_session().execute(self.uri, 'POST', self)
self._build(response['data'])
def _get(self):
"""Get an existing :class:`~dyn.tm.accounts.Contact` from the DynECT
System
"""
response = DynectSession.get_session().execute(self.uri, 'GET')
for key, val in response['data'].items():
setattr(self, '_' + key, val)
def _build(self, data):
for key, val in data.items():
setattr(self, '_' + key, val)
def _update(self, api_args=None):
"""Private update method which handles building this
:class:`~dyn.tm.accounts.Contact` object from the API JSON respnose
"""
response = DynectSession.get_session().execute(self.uri, 'PUT',
api_args)
self._build(response['data'])
@property
def nickname(self):
"""This :class:`~dyn.tm.accounts.Contact`'s DynECT System Nickname"""
return self._nickname
@nickname.setter
def nickname(self, value):
self._nickname = value
api_args = {'new_nickname': self._nickname}
self._update(api_args)
@property
def email(self):
"""This :class:`~dyn.tm.accounts.Contact`'s DynECT System Email address
"""
return self._email
@email.setter
def email(self, value):
self._email = value
api_args = {'email': self._email}
self._update(api_args)
@property
def first_name(self):
"""The first name of this :class:`~dyn.tm.accounts.Contact`"""
return self._first_name
@first_name.setter
def first_name(self, value):
self._first_name = value
api_args = {'first_name': self._first_name}
self._update(api_args)
@property
def last_name(self):
"""The last name of this :class:`~dyn.tm.accounts.Contact`"""
return self._last_name
@last_name.setter
def last_name(self, value):
self._last_name = value
api_args = {'last_name': self._last_name}
self._update(api_args)
@property
def organization(self):
"""The organization this :class:`~dyn.tm.accounts.Contact` belongs to
within the DynECT System
"""
return self._organization
@organization.setter
def organization(self, value):
self._organization = value
api_args = {'organization': self._organization}
self._update(api_args)
@property
def phone(self):
"""The phone number associated with this
:class:`~dyn.tm.accounts.Contact`
"""
return self._phone
@phone.setter
def phone(self, value):
self._phone = value
api_args = {'phone': self._phone}
self._update(api_args)
@property
def address(self):
"""This :class:`~dyn.tm.accounts.Contact`'s street address"""
return self._address
@address.setter
def address(self, value):
self._address = value
api_args = {'address': self._address}
self._update(api_args)
@property
def address_2(self):
"""This :class:`~dyn.tm.accounts.Contact`'s street address, line 2"""
return self._address_2
@address_2.setter
def address_2(self, value):
self._address_2 = value
api_args = {'address_2': self._address_2}
self._update(api_args)
@property
def city(self):
"""This :class:`~dyn.tm.accounts.Contact`'s city"""
return self._city
@city.setter
def city(self, value):
self._city = value
api_args = {'city': self._city}
self._update(api_args)
@property
def country(self):
"""This :class:`~dyn.tm.accounts.Contact`'s Country"""
return self._country
@country.setter
def country(self, value):
self._country = value
api_args = {'country': self._country}
self._update(api_args)
@property
def fax(self):
"""The fax number associated with this
:class:`~dyn.tm.accounts.Contact`
"""
return self._fax
@fax.setter
def fax(self, value):
self._fax = value
api_args = {'fax': self._fax}
self._update(api_args)
@property
def notify_email(self):
"""Email address where this :class:`~dyn.tm.accounts.Contact` should
receive notifications
"""
return self._notify_email
@notify_email.setter
def notify_email(self, value):
self._notify_email = value
api_args = {'notify_email': self._notify_email}
self._update(api_args)
@property
def pager_email(self):
"""Email address where this :class:`~dyn.tm.accounts.Contact` should
receive messages destined for a pager
"""
return self._pager_email
@pager_email.setter
def pager_email(self, value):
self._pager_email = value
api_args = {'pager_email': self._pager_email}
self._update(api_args)
@property
def post_code(self):
"""This :class:`~dyn.tm.accounts.Contacts`'s postal code, part of the
contacts's address
"""
return self._post_code
@post_code.setter
def post_code(self, value):
self._post_code = value
api_args = {'post_code': self._post_code}
self._update(api_args)
@property
def state(self):
"""This :class:`~dyn.tm.accounts.Contact`'s state"""
return self._state
@state.setter
def state(self, value):
self._state = value
api_args = {'state': self._state}
self._update(api_args)
@property
def website(self):
"""This :class:`~dyn.tm.accounts.Contact`'s website"""
return self._website
@website.setter
def website(self, value):
self._website = value
api_args = {'website': self._website}
self._update(api_args)
def delete(self):
"""Delete this :class:`~dyn.tm.accounts.Contact` from the Dynect System
"""
DynectSession.get_session().execute(self.uri, 'DELETE')
def __str__(self):
"""Custom str method"""
return force_unicode('<Contact>: {}').format(self.nickname)
__repr__ = __unicode__ = __str__
def __bytes__(self):
"""bytes override"""
return bytes(self.__str__())
| [
"[email protected]"
] | |
9158efa3c4789e93bed407cc64f865fb40b2f449 | db0817d46280a24bc73f2baf4d155ce848b081a7 | /tests/gg_test_api.py | 77d65c06034d439cee2fa1cc336273ee80933eff | [
"Apache-2.0"
] | permissive | chengentest1/HttpRunner | 12bf778a9bca4289c19ad833eb61e54f61070e3c | e107f05dac53920df493fa4d99f9a64bfb711187 | refs/heads/master | 2020-04-16T10:11:18.419759 | 2019-01-26T08:15:28 | 2019-01-26T08:15:28 | 165,494,531 | 0 | 0 | null | 2019-01-13T10:56:45 | 2019-01-13T10:56:45 | null | UTF-8 | Python | false | false | 468 | py | from httprunner.loader import load_folder_content, load_folder_files, load_api_folder
from httprunner import exceptions, logger
import os
name_path='C:/Users/cheng/PycharmProjects/HttpRunner/tests/api'
r=load_folder_content(name_path)
print(r)
# print("=========")
# for api_file_path, api_items in r.items():
# print(api_file_path)
# print(type(api_items))
# t=load_folder_files(name_path)
# print(os.getcwd())
# print(t)
g=load_api_folder(name_path)
print(g) | [
"[email protected]"
] | |
242dcac100f14edf82f700c8ed38d31f8a71f669 | 1241d1c1432801aa28f67689e38e37df798b5324 | /model/ConversationCreator.py | c1c5216bb7d61d33825f9238b9ba69f05e5afa58 | [] | no_license | NoumanMustafa1/Drive-Villa-Online-Car-Dealership- | b6d045944ed47c08cdd2b8e1506357973ebfc00e | 6e6e2181099b664244b8b87d2d72eec704b11f14 | refs/heads/master | 2023-01-06T16:58:44.629573 | 2020-11-07T15:52:54 | 2020-11-07T15:52:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 761 | py | def chatConversation(modelname,condition,boughtyear,transmission,purchaseprice,saleprice,lowestsalprice):
l1="Hey!\n Hello sir, welcome.\n I am here to buy a car. Could you please help me with that?\n Sure, sir. We have "+modelname+" bought at year "+boughtyear+" \n Could you please brief about it?.\n Sure! It was bought in "+boughtyear+" and has transmission of " +transmission+". Its in pretty good condition and very rare to find a car of this quality in the market.\n How much will this cost?\n Onroad price for this will be "+saleprice+"\n okay!\n Sure, sir. How about payment?\nI will make the complete payment by check.\n Sure, sir. We will get your car ready in a while.\n Ok!\n Thank You!\n Goodbye!"
lis=l1.split('\n')
return lis
| [
"[email protected]"
] | |
533646eaf481d05ece09b7e36c64575ff8465d6d | d91344eef0ab651bc9e8a99533ec7bb7667d9696 | /zodbtest.py | ff19c4756cbbb5e1e3c089e77b1de55089e55f93 | [] | no_license | jculpin/python-tutorial | 456a7fbb4dc0d626593dd4ea4920f4d85806b873 | 391ce0b9fcc09e0883c4ae0bf055afc0f75ebac7 | refs/heads/master | 2021-09-01T19:15:55.636225 | 2017-12-28T11:31:07 | 2017-12-28T11:31:07 | 109,722,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 561 | py | import ZODB, ZODB.FileStorage
import BTrees.OOBTree
import persistent
import transaction
class Account(persistent.Persistent):
def __init__(self):
self.balance = 0.0
def deposit(self, amount):
self.balance += amount
def cash(self, amount):
assert amount < self.balance
self.balance -= amount
storage = ZODB.FileStorage.FileStorage('mydata.fs')
db = ZODB.DB(storage)
connection = db.open()
root = connection.root
root.accounts = BTrees.OOBTree.BTree()
root.accounts['account-1'] = Account()
transaction.commit()
| [
"[email protected]"
] | |
ac392da5ab9ef853b68f170160d29812ce05bd89 | b03708b101a0d0fd9a030728b7cdc0ba8459f7f8 | /022-pm25.py | bdd7c0bb416f3d1df21de42fc9b72c6477893773 | [] | no_license | xigyou/Tutorial | 53a4b63b9c1f52c78647a3f8376e6988dc106c7c | 9f9ca4199866707319c97422fa5de690c133f243 | refs/heads/master | 2020-03-27T07:08:26.609394 | 2018-05-25T02:08:29 | 2018-05-25T02:08:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,310 | py | # -*- coding:utf-8 -*-
import requests
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from lxml import etree
def get_city_ls(url):
res = requests.get(url)
html = res.text
res = etree.HTML(html)
bf1 = res.xpath('//*[contains(concat( " ", @class, " " ), concat( " ", "citychk", " " ))]//a/@href')
city_ls = list(set(bf1))
return city_ls
def download_pm25(city_ls,path):
file = open(path,'a')
file.writelines("监测点"+","+"AQI指数"+","+"空气质量状况"+","+"PM2.5"+","+"PM10"+","+"Co"+","+"No2"+","+"So2"+","+"O3"+"\n")
for city in city_ls:
city_url = "http://www.tianqihoubao.com" + city
#print city_url
res = requests.get(city_url)
html = res.text
res = etree.HTML(html)
tr = res.xpath('//td/text()')[9:]
#print tr[0]
ls = []
for i in tr:
data = i.strip()
ls.append(data+',')
for i in range(0,len(ls)/9+1):
file.writelines(ls[i*9:i*9+9])
file.writelines("\n")
file.close()
if __name__ == "__main__":
url = 'http://www.tianqihoubao.com/aqi'
path = r'E:\pm25_all_2.csv'
lst = get_city_ls(url)
download_pm25(lst,path)
| [
"[email protected]"
] | |
3ee968cd58c3411ec79d84bd304b6c6ef7e8fef2 | 64d18f3d83f2e5fb84bdb0291370400841788aee | /iris.py | 559594882810c8adfbde1c3124ef4dd0b0c1422a | [] | no_license | KuffourJason/1-NN-Classifcation-with-Iris-Dataset | 61c06cbb4f73eacaaa2fe63ffb98b3a4bd73af04 | 73f8c8d21e556603af1a77b84d6519b02cb7163c | refs/heads/master | 2021-01-20T07:10:53.682692 | 2017-05-01T23:59:11 | 2017-05-01T23:59:11 | 89,973,178 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 985 | py | from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#loads iris data
iris_dataset = load_iris()
#prints all the keys in the data
print("Keys of iris_dataset: \n{}".format(iris_dataset.keys()))
#splits dataset into training and test data
X_train, X_test, y_train, y_test = train_test_split(iris_dataset['data'], iris_dataset['target'], random_state=0)
#Data visualization
iris_dataframe = pd.DataFrame(X_train, columns=iris_dataset.feature_names)
pd.scatter_matrix(iris_dataframe, c=y_train, figsize=(15, 15), marker='o', hist_kwds={'bins': 20}, s=60, alpha=.8)
plt.show()
#Model building using k-nearest neighbors
knn = KNeighborsClassifier(n_neighbors=1)
#fitting the model
knn.fit(X_train, y_train)
#Evaluating the model
accuracy = knn.score(X_test, y_test)
print("The accuracy of the model is " + str(accuracy) ) | [
"[email protected]"
] | |
bef3090f74a5d9bd04a6954de1907752e9738ed3 | dae652a3ca1497af6bc049944e3457a7c9d6cb49 | /z5tracker/rulesets/rando_aa_v4/Location.py | de4b97dde411f11966b510904a22c8f12df70a47 | [
"MIT"
] | permissive | HarborneD/z5-tracker | 8ca09fc9a317dc2b66685275b6d257dd5f4b9553 | 4631b33cc6584efcbd8df7e7d635d6ff0b7064fe | refs/heads/master | 2022-02-21T21:38:05.217211 | 2019-06-25T17:19:48 | 2019-07-05T08:27:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,140 | py | from .LocationList import location_table
from enum import Enum
class Location(object):
def __init__(self, name='', address=None, address2=None, default=None, type='Chest', scene=None, hint='Termina', parent=None):
self.name = name
self.parent_region = parent
self.item = None
self.address = address
self.address2 = address2
self.default = default
self.type = type
self.scene = scene
self.hint = hint
self.spot_type = 'Location'
self.recursion_count = 0
self.staleness_count = 0
self.access_rule = lambda state: True
self.item_rule = lambda location, item: True
self.locked = False
self.price = None
self.minor_only = False
self.world = None
self.disabled = DisableType.ENABLED
def copy(self, new_region):
new_location = Location(self.name, self.address, self.address2, self.default, self.type, self.scene, self.hint, new_region)
new_location.world = new_region.world
if self.item:
new_location.item = self.item.copy(new_region.world)
new_location.item.location = new_location
new_location.spot_type = self.spot_type
new_location.access_rule = self.access_rule
new_location.item_rule = self.item_rule
new_location.locked = self.locked
new_location.minor_only = self.minor_only
new_location.disabled = self.disabled
return new_location
def can_fill(self, state, item, check_access=True):
if self.minor_only and item.majoritem:
return False
return (
not self.is_disabled() and
self.can_fill_fast(item) and
(not check_access or state.can_reach(self)))
def can_fill_fast(self, item):
return (self.parent_region.can_fill(item) and self.item_rule(self, item))
def can_reach(self, state):
if not self.is_disabled() and \
self.access_rule(state) and \
state.can_reach(self.parent_region):
return True
return False
def is_disabled(self):
return (self.disabled == DisableType.DISABLED) or \
(self.disabled == DisableType.PENDING and self.locked)
def __str__(self):
return str(self.__unicode__())
def __unicode__(self):
return '%s' % self.name
def LocationFactory(locations, world=None):
ret = []
singleton = False
if isinstance(locations, str):
locations = [locations]
singleton = True
for location in locations:
if location in location_table:
type, scene, default, hint, addresses = location_table[location]
if addresses is None:
addresses = (None, None)
address, address2 = addresses
ret.append(Location(location, address, address2, default, type, scene, hint, ret))
else:
raise KeyError('Unknown Location: %s', location)
if singleton:
return ret[0]
return ret
class DisableType(Enum):
ENABLED = 0
PENDING = 1
DISABLED = 2
| [
"[email protected]"
] | |
c8673f26dbe2c39f5218a0b512d836550e57c683 | ec3d59db3210f8aed6b9a00eded9eff29a5dbfc5 | /backend/ravegenieApp/models/business.py | 2825569b8803014d9401d1bdc08ebbf64e08f0a5 | [] | no_license | Iamfocus/Ravegenieweb | 00f86e95afe8cf583298ab59dce3139602d6555f | 46b02146409589f0d7843d55a8edd6953a329fe8 | refs/heads/main | 2023-05-11T08:18:52.817113 | 2021-05-19T13:28:03 | 2021-05-19T13:28:03 | 347,775,447 | 0 | 0 | null | 2021-05-19T13:28:04 | 2021-03-14T23:01:20 | JavaScript | UTF-8 | Python | false | false | 2,051 | py | from django.db import models
from utils.models import ModelMixin
from django.utils import timezone
from datetime import timedelta
import time
class Business(models.Model, ModelMixin):
SUBSCRIPTION_DAYS = 30
exclusive_start_date = models.DateTimeField(null=True)
exclusive_end_date = models.DateTimeField(null=True)
account_balance = models.DecimalField(default=0, decimal_places=2, max_digits=10)
bonus_balance = models.DecimalField(default=0, decimal_places=2, max_digits=10)
business_details = models.TextField(null=True, max_length=500)
first_campaign_ref_paid = models.BooleanField(default=False)
exclusive_spots_used = models.PositiveSmallIntegerField(default=0)
class Meta:
ordering = ['-id']
models.CheckConstraint(check=models.Q(account_balance__gt=0), name='ensure_balance_is_gt_zero')
models.CheckConstraint(check=models.Q(bonus_balance__gt=0), name='ensure_bonus_is_gt_zero')
def reset_exclusive(self):
self.exclusive_end_date = None
self.exclusive_start_date = None
self.exclusive_spots_used = 0
def is_exclusive(self):
if self.exclusive_end_date:
return timezone.now() < self.exclusive_end_date
return False
def start_subscription(self):
self.exclusive_start_date = timezone.now()
def end_subscription(self):
self.exclusive_start_date = None
self.exclusive_end_date = None
def _set_exclusive_end(self):
if self.exclusive_start_date and not self.exclusive_end_date:
days = self.__class__.SUBSCRIPTION_DAYS
self.exclusive_end_date = self.exclusive_start_date + timedelta(days=days)
def save(self, *args, **kwargs):
self._set_exclusive_end()
super().save(*args, **kwargs)
def get_dict(self):
business_data = {
"exclusiveStartDate": self.get_time_string(self.exclusive_start_date),
"exclusiveEndDate": self.get_time_string(self.exclusive_end_date),
'isExclusive': self.is_exclusive(),
'accountBalance': self.account_balance,
'bonusBalance': self.bonus_balance,
}
user_data = self.user.get_dict()
user_data.update(business_data)
return user_data | [
"[email protected]"
] | |
81c26e3a0d8ed242955d3e50604d0bd97065e591 | 7fa4633ea229fc866cc99992bf9f891663d39ec6 | /common/experiment_manager/src/experiment_manager/msg/__init__.py | d4cd96b7b9dc0d1ccf630dd04ed765622acb87f2 | [] | no_license | onuryuruten/rossi-demo | f7aeccfea9228e13dbab85642426dce3de5ba925 | ab3ef04c40f66ec0883c26910c79cb0c72209051 | refs/heads/master | 2020-12-25T11:42:15.966155 | 2012-01-20T08:17:50 | 2012-01-20T08:17:50 | 3,162,374 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32 | py | from ._ExperimentState import *
| [
"[email protected]"
] | |
010582cd02fe383b3413bf2b655b36b3af22c368 | 7ba54b83de814cd34f0058e797cf3d6313057147 | /mmdetection/configs/xray/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_2x_nih_chestx_det.py | f3478d0c54101f9e68bb9062d6b3357c0452e8b7 | [] | no_license | TerryGriffin/COMP5300.AdvancedDeepLearning | 083344a939e99fe7e2119225e023ab8aebdda04e | 9521c3327ba6d8344711cd3e404e627af9ffc936 | refs/heads/master | 2023-02-01T10:06:00.355905 | 2020-12-16T02:05:12 | 2020-12-16T02:05:12 | 321,839,952 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | _base_ = './faster_rcnn_r50_fpn_2x_nih_chestx_det.py'
model = dict(
pretrained='open-mmlab://resnext101_32x4d',
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
| [
"[email protected]"
] | |
65ed5768bb3bdfc61f10d6cc6a59dfbb999c8d92 | 4a020c0a492d931f7da5c452c9569fba06703686 | /testing/web-platform/tests/webdriver/ecmascript/ecmascript_test.py | cf27c01c06a02063b1347f6673721aec1f88a454 | [
"LicenseRef-scancode-w3c-03-bsd-license",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | rbernon/wine-gecko | 353173511a790127ffa2ad39d630b8a0dcbbf5bf | 550ad9eac229b769992f421ce9492ca46edabaa0 | refs/heads/master | 2023-08-06T21:25:26.836672 | 2020-11-30T12:47:56 | 2021-09-30T08:14:19 | 411,965,809 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | import os
import sys
import unittest
sys.path.insert(1, os.path.abspath(os.path.join(__file__, "../..")))
import base_test
class EcmasScriptTest(base_test.WebDriverBaseTest):
def test_that_ecmascript_returns_document_title(self):
self.driver.get(self.webserver.where_is("ecmascript/res/ecmascript_test.html"))
result = self.driver.execute_script("return document.title;");
self.assertEqual("ecmascript test", result);
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
fd6aea356a3246137a5ec0c275e30c2ce03ca7e2 | 4e61dda91cb503d780241ab3e9fe23e2203ff47a | /Squares.py | 50b1ee0bf08e9dcf76b026e093c8474149de2428 | [] | no_license | konrad-hod-kowalczyk/Python_exercises | bdb57e32a03c1101a679b56e14fb3325f69f389a | b5f40a9f7e3c9bcec2748fd7fc7b08a8404f2da9 | refs/heads/master | 2022-11-25T02:51:44.268063 | 2020-08-01T18:27:57 | 2020-08-01T18:27:57 | 247,715,290 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | def perimeter(n):
square=2;
r=[1,1]
for i in range(2,n+1):
r.append(r[i-1]+r[i-2])
square=square+r[i]
square = square*4
return square
| [
"[email protected]"
] | |
802f0f9c3488a45954dd123dd019302f69a2ac51 | 3e46282bb17323669753dffa65142f4e4484ac0e | /adminPage/webApp/views.py | aafca5f3e8f02ac2c0984f51d5e27ecfdcf04207 | [] | no_license | YeongBinByeon/django_log | a88c064101bb338795aafff89084415149cc797e | aa9ccdf94ab17166c9a80a5f0400396bd68fb92b | refs/heads/master | 2022-04-10T04:44:42.715199 | 2020-03-11T17:11:23 | 2020-03-11T17:11:23 | 246,173,037 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,568 | py | from django.shortcuts import render
import pandas as pd
from .models import *
import sqlite3
from django.http import JsonResponse
# Create your views here.
def utterance(request):
print("hello")
df = pd.read_excel(r'C:\code_folder\django_log\adminPage\data.xlsx')
print(df['date'].iloc[0])
print(df.index)
con = sqlite3.connect("db.sqlite3")
print(con)
print(df)
#df.to_sql('webApp_utterance', con)
#print(df['date'].iloc[0])
#print(df['tv_search_title'].iloc[0] )
for i in df.index:
Utterance(date=df['date'].iloc[i], language=df['language'].iloc[i], mainAction=df['mainAction'].iloc[i], utterance=df['utterance'].iloc[i]).save()
#queryset = Utterance.objects.create(date=df['date'].iloc[0], tv_search_title=df['tv_search_title'].iloc[0], tv_unknown_search=df['tv_unknown_search'].iloc[0], tv_open_search=df['tv_open_search'].iloc[0], tv_play_title=df['tv_play_title'].iloc[0])
return render(request, "webApp/test.html")
# Create your views here.
def utterance1(request):
print("hello")
df = pd.read_excel(r'C:\code_folder\django_log\adminPage\data1.xlsx')
print(df['date'].iloc[0])
print(df.index)
con = sqlite3.connect("db.sqlite3")
print(con)
print(df)
#df.to_sql('webApp_utterance', con)
#print(df['date'].iloc[0])
#print(df['tv_search_title'].iloc[0] )
for i in df.index:
Utterance(date=df['date'].iloc[i], language=df['language'].iloc[i], mainAction=df['mainAction'].iloc[i], utterance=df['utterance'].iloc[i]).save()
#queryset = Utterance.objects.create(date=df['date'].iloc[0], tv_search_title=df['tv_search_title'].iloc[0], tv_unknown_search=df['tv_unknown_search'].iloc[0], tv_open_search=df['tv_open_search'].iloc[0], tv_play_title=df['tv_play_title'].iloc[0])
return render(request, "webApp/test.html")
def saveKRMainAction(request):
df = pd.read_excel(r'C:\code_folder\django_log\adminPage\kr_mainAction.xlsx')
con = sqlite3.connect("db.sqlite3")
for i in df.index:
KR_MainAction(date=df['date'].iloc[i], language=df['language'].iloc[i], mainAction=df['mainAction'].iloc[i], count=df['count'].iloc[i], rate=df['rate'].iloc[i]).save()
return render(request, "webApp/test.html")
def readKRMainAction(request):
querySet = KR_MainAction.objects.all()
print(type(list(querySet.values())))
for row in querySet.values_list():
print(row)
return JsonResponse(data=list(querySet.values()), safe=False)
def chartjs(request):
return render(request, "webApp/chartjs.html")
| [
"[email protected]"
] | |
4504613d10c713611621a028b61bb44666f0a9f3 | 716abd9e5ba4b72b72cc5f724a6cc0a6ad4390d1 | /6-Operators of Python/30-Identity-and-Membership-operators.py | 1c0385bd6405585f5684b04ece645772b30e7ea3 | [] | no_license | devopstasks/PythonScripting | ac45edd72dc134ec3539b962f02dfc866f365ecf | 48bc37733ae6b3be4e2d64909ffe0962b6908518 | refs/heads/master | 2023-03-29T11:18:01.329452 | 2021-04-07T03:25:20 | 2021-04-07T03:25:20 | 350,388,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 578 | py | '''
===================================
Identity operators are used to find the type of: class/type/object.
There are 2 types of Identity operators
- is
- is not
===================================
x=6
type(x) -> <class 'int'>
y="hi"
type(y) -> <class 'str'>
type(x) is type(y) -> False
type(x) is not type(y) -> True
'''
'''
====================================
Membership operators are used to validate the membership of a value.
There are 2 types of Membership operators
- in
- not in
====================================
x=[2,3,5,9]
3 in x -> True
4 in x -> False
'''
| [
"[email protected]"
] | |
8eab42ec4e999316d34dc5b305592808d36f835a | 9dc8c299ee7d4a225002127cc03b4253c8a721fd | /libs/simulator/livepush_acl_topic_simulator.py | 823dabe3f0baee30b5bbd03a10492cff39593daf | [] | no_license | namesuqi/strategy_corgi | 5df5d8c89bdf7a7c465c438048be20ef16120f4f | 557b8f8eabf034c2a57c25e6bc581858dd4f1b6e | refs/heads/master | 2020-03-07T04:00:18.313901 | 2018-03-29T07:50:50 | 2018-03-29T07:50:50 | 127,253,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 787 | py | #!/usr/bin/python
# coding=utf-8
# livepush_acl report simulator
from libs.simulator.topic_simulator import *
from config import push_review_duration
from libs.module.live_push import *
# live_push report livepush_acl
class LivePushAclTopicSimulator(TopicSimulator):
def __init__(self, log):
super(LivePushAclTopicSimulator, self).__init__()
self.log = log
self.table_name = Live_Push
self.topic = TOPIC_PUSH_ACL
self.review_duration = push_review_duration
def create_topic_data(self, result, **kwargs):
return {"topic": self.topic,
"timestamp": int(time.time() * 1000),
"event": result.event,
"livepush_ip": result.ip,
"reason": result.reason
}
| [
"[email protected]"
] | |
2c918e376e381d5bd464137531852c689ae9df51 | 3d60dee9c6adac59ba7d8c1e8e3dc19bf309eaa5 | /dsa_starter/migrations/0003_auto_20170418_1538.py | 92579360bc98f85d9388cab15b0579ac84c3749c | [] | no_license | michelluther/table-top-server | 9d3eda8f9bb572884c1ed6e7a64052e93f52c95c | 476c33dd45f8baf89a24090520ba99c7de4386d5 | refs/heads/master | 2023-06-14T11:11:14.924642 | 2023-05-14T15:12:30 | 2023-05-14T15:12:30 | 89,470,476 | 2 | 0 | null | 2023-02-10T22:56:22 | 2017-04-26T10:51:02 | JavaScript | UTF-8 | Python | false | false | 1,014 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-18 13:38
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('dsa_starter', '0002_character_name'),
]
operations = [
migrations.CreateModel(
name='Race',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(default='Mensch')),
],
),
migrations.AddField(
model_name='character',
name='type',
field=models.TextField(default=''),
),
migrations.AddField(
model_name='character',
name='race',
field=models.ForeignKey(default='1', on_delete=django.db.models.deletion.CASCADE, to='dsa_starter.Race'),
preserve_default=False,
),
]
| [
"[email protected]"
] | |
5e8052af8629684bb42632d743efaf5d48119ba1 | 1b94aae63500b6ff94b0446d01c3c9bee385fad2 | /.history/chandori/account/views_20210824172332.py | 4c4c5078aeec6d0af4357fc59d04853d7bbcc83d | [] | no_license | miracle3070/chandori | 71389c2a9df76c242a5895c2c23d4394220f9c8e | b01d1eaa1d9c0d12d7abdc8f164039bcd9c42925 | refs/heads/master | 2023-08-18T11:46:11.303934 | 2021-09-28T19:23:22 | 2021-09-28T19:23:22 | 393,949,742 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,675 | py | from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.forms import UserChangeForm
from django.utils import timezone
from .models import *
from .forms import CustomUserChangeForm
from django.contrib import messages
def edit(request):
return render(request, edit.html)
def edit(request):
if request.method == 'POST':
user_change_form = CustomUserChangeForm(request.POST, instance = request.user)
if user_change_form.is_valid():
user_change_form.save()
messages.success(request, '회원정보가 수정되었습니다.')
return render(request, 'accounting/templates/base.html')
else:
user_change_form = CustomUserChangeForm(instance = request.user)
return render(request, 'from django.contrib import messages.html', {'user_change_form':user_change_form})
def login_view(request):
error_msg = ""
if request.method == "POST":
username = request.POST.get('username')
password = request.POST.get('password')
if username == "" or password == "":
error_msg = "아이디 또는 비밀번호를 입력해주세요."
else:
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect("accounting:home")
else:
error_msg = "아이디 또는 비밀번호가 틀렸습니다."
return render(request, "login.html", {"error_msg" : error_msg})
def logout_view(request):
logout(request)
return redirect("accounting:home")
def signup_view(request):
error_msg = ""
if request.method == "POST":
password1 = request.POST["password1"]
password2 = request.POST["password2"]
if password1 == password2:
username = request.POST["username"]
nickname = request.POST["nickname"]
age = int(request.POST['age'])
job = request.POST['job']
income = int(request.POST['income'])
signup_date = timezone.now()
user = CustomUser.objects.create_user(
username = username,
password = password1,
nickname = nickname,
age = age,
job = job,
income = income,
signup_date = signup_date,
)
return redirect("account:login")
else:
error_msg = "비밀번호가 일치하지 않습니다."
return render(request, "signup.html", {"error_msg" : error_msg})
| [
"[email protected]"
] | |
d8f78b6e7b8e862a47c874db93883e8e260cfe9e | cb56ef1cb0315767859e1e0599692fec54143139 | /scripts/map_manager.py | 4f322efc0afe94b7425cb658dfdc68828d4e1579 | [] | no_license | ddiggins/MavROS-Drone-Control | e68a7aff8a89ad9ffb35894c13c66f2dbaa1214a | 05e9f26baeff7392c6021bf48aa950376830abfb | refs/heads/master | 2021-01-17T19:56:36.549515 | 2015-07-28T21:14:53 | 2015-07-28T21:14:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,477 | py | #!/usr/bin/env python
import urllib2
import cv2
import numpy as np
import rospy
from geometry_msgs.msg import Polygon, Point32
import copy
import math
WINDOW_NAME = "Map"
MAP_HEIGHT = 640
ZOOM = 19
class MapManager(object):
BASE_URL = "http://maps.googleapis.com/maps/api/staticmap"
def __init__(self, map_height, zoom, lat, lon):
self.map_height = map_height
self.zoom = zoom
self.static_map = self.make_map_request(lat, lon)
self.img = copy.copy(self.static_map)
self.center_lat = lat
self.center_lon = lon
self.plotted_points = []
def make_map_request(self, lat, lon):
lat = "%s" % lat
lon = "%s" % lon
params = (self.BASE_URL, lat, lon, self.zoom, self.map_height, self.map_height)
full_url = "%s?center=%s,%s&zoom=%s&size=%sx%s&sensor=false&maptype=satellite" % params
response = urllib2.urlopen(full_url)
png_bytes = np.asarray([ord(char) for char in response.read()], dtype=np.uint8)
cv_array = cv2.imdecode(png_bytes, cv2.CV_LOAD_IMAGE_UNCHANGED)
return cv_array
@property
def degrees_in_map(self):
'''
This logic is based on the idea that zoom=0 returns 360 degrees
'''
#return (self.map_height / 256.0) * (360.0 / pow(2, self.zoom))
deg_lat = self.linear_meters_in_map/111319.9
scale = math.cos(self.center_lat*math.pi/180)
deg_lon = deg_lat/scale
return (deg_lat, deg_lon)
def degrees_to_meters(self, degrees):
equator_length_km = 40008
km_per_degree = equator_length_km / 360.0
m_per_degree = km_per_degree * 1000
return degrees * m_per_degree
@property
def linear_meters_in_map(self):
meters_in_map = 591657550.500000 / pow(2, self.zoom+3)
return meters_in_map
def _window_x_y_to_grid(self, x, y):
'''
converts graphical x, y coordinates to grid coordinates
where (0, 0) is the very center of the window
'''
center_x = center_y = self.map_height / 2
new_x = x - center_x
new_y = -1 * (y - center_y)
return new_x, new_y
def _grid_x_y_to_window(self, x, y):
center_x = center_y = self.map_height / 2
new_x = center_x + x
new_y = center_y - y
return new_x, new_y
def x_y_to_lat_lon(self, x, y):
grid_x, grid_y = self._window_x_y_to_grid(x, y)
offset_x_degrees = (float(grid_x) / self.map_height) * self.degrees_in_map[1]
offset_y_degrees = (float(grid_y) / self.map_height) * self.degrees_in_map[0]
# lat = y, lon = x
return self.center_lat + offset_y_degrees, self.center_lon + offset_x_degrees
def lat_lon_to_x_y(self, lat, lon):
'''
Returns x, y coordinates where (0, 0) is the top left
'''
offset_lat_degrees = lat - self.center_lat
offset_lon_degrees = lon - self.center_lon
grid_x = (offset_lon_degrees / self.degrees_in_map[1]) * self.map_height
grid_y = (offset_lat_degrees / self.degrees_in_map[0]) * self.map_height
window_x, window_y = self._grid_x_y_to_window(grid_x, grid_y)
return int(window_x), int(window_y)
def mouse_callback(self, event, x, y, flag=0, param=None):
if event == cv2.EVENT_LBUTTONDOWN:
lat, lon = self.x_y_to_lat_lon(x, y)
self.plot_point(lat, lon)
print lat, lon
if event == cv2.EVENT_RBUTTONDOWN:
self.plotted_points.pop(-1)
self.img = copy.copy(self.static_map)
def plot_point(self, lat, lon):
self.plotted_points.append((lat, lon,))
def get_plotted_points_as_x_y_list(self):
'''
returns plotted lat, lon points as drawable (x, y) window coordinates
'''
return [self.lat_lon_to_x_y(*tuple_inst) for tuple_inst in self.plotted_points]
# initialize the map
def start_map():
starting_coords = (42.293173, -71.263540) # my apartment...
manager = MapManager(MAP_HEIGHT, ZOOM, starting_coords[0], starting_coords[1])
# BGR colors
RED = cv2.cv.Scalar(0, 0, 255)
YELLOW = cv2.cv.Scalar(0, 180, 180)
cv2.namedWindow(WINDOW_NAME, cv2.CV_WINDOW_AUTOSIZE)
cv2.cv.SetMouseCallback(WINDOW_NAME, manager.mouse_callback)
rospy.init_node('map_manager')
pub = rospy.Publisher('google_waypoints', Polygon, queue_size=10)
r = rospy.Rate(10)
polygon = Polygon()
if rospy.is_shutdown():
print 'borked'
while not rospy.is_shutdown():
points = manager.get_plotted_points_as_x_y_list()
for i in range(len(points)):
cv2.circle(manager.img, center=points[i], radius=5, color=RED, thickness=-1)
if i > 0:
cv2.line(manager.img, pt1=points[i-1], pt2=points[i], color=YELLOW, thickness = 2)
cv2.imshow(WINDOW_NAME, manager.img)
r.sleep()
polygon.points = []
for location in manager.plotted_points:
point = Point32()
point.x = location[0]
point.y = location[1]
point.z = 0
polygon.points.append(point)
pub.publish(polygon)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows
cv2.waitKey(1)
print manager.plotted_points
#while not (cv2.waitKey(1) & 0xFF == ord('m')):
# pass
#self.plotted_points = []
if __name__ == '__main__':
start_map() | [
"[email protected]"
] | |
d4e017fae28241185b5f46eaf8b7922f77b7cfcc | ca6975b6038e47e1b3e34d188039e96f1c695fc1 | /ListComprehension.py | db6a35087aace7efecf59d66b703963b85783158 | [] | no_license | jonesbf/PythScripto | d528f911a9dd0bb444e66113dc78e163a4f2541e | db325dd66ba785f85b9090ac7eb04c2735513bd3 | refs/heads/master | 2021-01-02T09:19:21.342917 | 2014-09-17T21:49:41 | 2014-09-17T21:49:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 292 | py | doubles_by_3 = [x*2 for x in range(1,6) if (x*2) % 3 == 0]
# Complete the following line. Use the line above for help.
even_squares = [x ** 2 for x in range(1,11) if x % 2 == 0]
print even_squares
cubes_by_four = [x ** 3 for x in range(1,11) if (x ** 3) % 4 == 0]
print cubes_by_four
| [
"[email protected]"
] | |
b7346b389ff30c566e8bc6651ae60b10c67ebf2b | 0e3462bd127c2072b34ac4885c034bde58ac8063 | /c_sharp_code_rewritten_in_python/transformer_command_line_interface.py | 7e6cb49274f86f384976fe5a54132f56ddee81b0 | [] | no_license | SNeicer/py_regex_translator_core | 608b3b6e5d4aff1496a910d38276562f25013cb7 | ea45e1eb556285f20f702ae6bd697aebd6f0efe6 | refs/heads/master | 2023-02-12T04:49:03.063797 | 2021-01-08T14:06:15 | 2021-01-08T14:06:15 | 326,164,216 | 0 | 0 | null | 2021-01-02T11:26:50 | 2021-01-02T11:04:11 | Python | UTF-8 | Python | false | false | 331 | py | from c_sharp_code_rewritten_in_python import interfaces
class TransformerCLI:
def __init__(self, transformer: interfaces.IFileTransformer):
self._transformer = transformer
def run(self, *args):
sourcePath = args[0]
targetPath = args[1]
self._transformer.transform(sourcePath, targetPath)
| [
"[email protected]"
] | |
060486b8f89bc30164c00394137fae03c866a6b2 | 6471745f084ab85b814850af6f697607c456e7ad | /data_stream.py | f8f8e93d041f9fc483cf031fa28360ce5ea89755 | [] | no_license | YoshiyukiKono/Streaming_P2_SF-Crime-Statistics-w-Spark-Streaming | 8065bae6c73500142f37bc0a466f110c026fc821 | 163120454532bea090518fec272b789c7d42f900 | refs/heads/master | 2021-01-09T11:34:45.992602 | 2020-03-08T06:54:44 | 2020-03-08T06:54:44 | 242,285,148 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,068 | py | import logging
import json
from pyspark.sql import SparkSession
from pyspark.sql.types import *
import pyspark.sql.functions as psf
# Spark
from pyspark import SparkContext
# Spark Streaming
from pyspark.streaming import StreamingContext
from pyspark.sql.functions import col
from pyspark.sql.functions import udf
import datetime
# TODO Create a schema for incoming resources
schema = StructType([
StructField("crime_id", StringType(), True),
StructField("original_crime_type_name", StringType(), True),
StructField("report_date", DateType(), True),
StructField("call_date", DateType(), True),
StructField("offense_date", DateType(), True),
StructField("call_time", StringType(), True),
StructField("call_date_time", TimestampType(), True),
StructField("disposition", StringType(), True),
StructField("address", StringType(), True),
StructField("city", StringType(), True),
StructField("state", StringType(), True),
StructField("agency_id", StringType(), True),
StructField("address_type", StringType(), True),
StructField("common_location", StringType(), True)
])
#https://stackoverflow.com/questions/48305443/typeerror-column-object-is-not-callable-using-withcolumn/50805490
#@udf
def to_yyyymmddhh(timestamp):
#print(f"TIMESTAMP:{type(timestamp)}")
#converted_time = datetime.datetime.strptime(timestamp, "%Y%m%d%H%M%S")
return timestamp.strftime("%Y/%m/%d %H")
#return timestamp
to_yyyymmddhh_udf = udf(to_yyyymmddhh)#, TimestampType())
def run_spark_job(spark):
# TODO Create Spark Configuration
df = spark \
.readStream \
.format("kafka") \
.option("kafka.bootstrap.servers", "localhost:9092") \
.option("subscribe", "police-department-calls-for-service") \
.option("startingOffsets", "earliest") \
.option("maxOffsetsPerTrigger", 10) \
.option("stopGracefullyOnShutdown", "true") \
.option("failOnDataLoss", "false") \
.load()
# Show schema for the incoming resources for checks
df.printSchema()
# TODO extract the correct column from the kafka input resources
# Take only value and convert it to String
kafka_df = df.selectExpr("CAST(value AS STRING)")
service_table = kafka_df\
.select(psf.from_json(psf.col('value'), schema).alias("DF"))\
.select("DF.*").alias("service")
# TODO select original_crime_type_name and disposition
distinct_table = service_table\
.select("original_crime_type_name", "disposition", "call_date_time")
# count the number of original crime type
#
#agg_df = service_table\
agg_df = distinct_table\
.select("original_crime_type_name", "disposition", "call_date_time")\
.withColumn('call_date_hour', to_yyyymmddhh_udf(service_table.call_date_time))\
.withWatermark("call_date_time", '60 minutes')\
.groupBy("original_crime_type_name", "disposition", 'call_date_hour')\
.count()
# TODO Q1. Submit a screen shot of a batch ingestion of the aggregation
# TODO write output stream
#query = agg_df \
#query = service_table \
"""
query = distinct_table \
.writeStream \
.format("console") \
.queryName("Micro Batch") \
.trigger(processingTime="20 seconds") \
.option("checkpointLocation", "/tmp/checkpoint") \
.start()
query.awaitTermination()
query = agg_df \
.writeStream \
.trigger(processingTime="30 seconds") \
.outputMode('Update') \
.format('console') \
.option("truncate", "false") \
.start()
"""
# https://knowledge.udacity.com/questions/72289
#.outputMode('Update')
""""
query = agg_df \
.writeStream \
.format("console") \
.queryName("Micro Batch") \
.trigger(processingTime="60 seconds") \
.option("checkpointLocation", "/tmp/checkpoint") \
.outputMode('Update') \
.start()
"""
# TODO attach a ProgressReporter
#query.awaitTermination()
# TODO get the right radio code json path
radio_code_json_filepath = "radio_code.json"
radio_code_df = spark.read.json(radio_code_json_filepath, multiLine=True).alias("radio_code")
radio_code_df.printSchema()
# clean up your data so that the column names match on radio_code_df and agg_df
# we will want to join on the disposition code
# TODO rename disposition_code column to disposition
radio_code_df = radio_code_df.withColumnRenamed("disposition_code", "disposition")
# TODO join on disposition column
#join_query = distinct_table\
# .join(radio_code_df, distinct_table.disposition == distinct_table.disposition, 'inner')\
# Confirmed to work
"""
join_query = distinct_table\
.join(radio_code_df, "disposition", 'left_outer')\
.writeStream \
.format("console") \
.queryName("Join Query") \
.trigger(processingTime="10 seconds") \
.option("checkpointLocation", "/tmp/checkpoint") \
.outputMode('append') \
.start()
"""
# Confirmed to work
#join_distinct_query = distinct_table\
# .join(radio_code_df, "disposition", 'left_outer')
join_query = distinct_table\
.join(radio_code_df, "disposition", 'left_outer')\
.select("original_crime_type_name", "disposition","description", "call_date_time")\
.withColumn('call_date_hour', to_yyyymmddhh_udf(service_table.call_date_time))\
.withWatermark("call_date_time", '60 minutes')\
.groupBy("original_crime_type_name", "disposition","description",'call_date_hour')\
.count()\
.writeStream \
.format("console") \
.queryName("Join Query") \
.trigger(processingTime="10 seconds") \
.option("checkpointLocation", "/tmp/checkpoint") \
.outputMode('update') \
.start()
join_query.awaitTermination()
if __name__ == "__main__":
logger = logging.getLogger(__name__)
# TODO Create Spark in Standalone mode
spark = SparkSession \
.builder \
.master("local[*]") \
.appName("KafkaSparkStructuredStreaming") \
.getOrCreate()
"""
spark = SparkSession \
.builder \
.config("spark.streaming.blockInterval","50ms") \
.config("spark.streaming.receiver.maxRate", "0") \
.master("local[*]") \
.appName("KafkaSparkStructuredStreaming") \
.getOrCreate()
"""
"""
sc = SparkContext(appName="KafkaSparkStructuredStreaming")
ssc = StreamingContext(sc, 1)
spark = SparkSession \
.builder \
.master("local[*]") \
.config(conf=ssc)\
.getOrCreate()
"""
logger.info("Spark started")
spark.sparkContext.setLogLevel("WARN")
logging.getLogger("log4j.logger.org.apache.spark.sql.execution.streaming.MicroBatchExecution")\
.setLevel(logging.INFO)
run_spark_job(spark)
#run_spark_job(ssc)
spark.stop() | [
"[email protected]"
] | |
4bb21d2b7264dfd557b883070ce76da58e45731a | e90f8bd31d0f6eaa444824da2144200fbdb1e487 | /runtimelib/tests/grid/wrapping/wrapping.test | 08f93b2cab459bc77a8cc4cfd2c49e0e99d8ec03 | [] | no_license | stonea/gridweaver | 22e57286a5db8c6b60eee242147c6ef71f0c9929 | f3843b0b10d37c2f44df053c0a7e960de87ea1c5 | refs/heads/main | 2023-07-04T22:08:58.160006 | 2021-02-15T16:42:40 | 2021-02-15T16:42:40 | 338,865,036 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | test | #!/usr/bin/python
import commands
import os
import time
global passed
global processes
rebuild()
runProg("./wrapping", "wrapping")
| [
"[email protected]"
] | |
c6fbb46e513e8c7d8eb126f21a81ecaac39f4d26 | 2bf43ccdcef26b6dce1c16d8058b5495a776c4dc | /proxy_service/urls.py | f5c1beb947f1c3f474c877c5d041108c45b1e500 | [] | no_license | xuhaomin/webservice_for_pachong | 7c2dc15b072064931081fa7f91c432da62e3c8a1 | 23c8ebd8cbaafb56c2f8396a809fc2e3fd896ba3 | refs/heads/master | 2021-08-26T09:44:58.090704 | 2017-11-23T03:19:28 | 2017-11-23T03:19:28 | 111,656,090 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 827 | py | """pachong_webservice URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^get', views.get_proxy, name='getproxy'),
url(r'^add', views.add_proxy, name='addproxy'),
] | [
"[email protected]"
] | |
da27329e32caa8569890e76264621a933e09077b | ece123467d071eb3e993760fd53ee86ab03f33f1 | /tutorapp/chat.py | 9584b1432fa52b2110a79e1c4bf97078409f818a | [
"MIT"
] | permissive | wann100/tutorapp | 69411bfb20795b2af7a4e44031180a82069e6230 | 27da5846cc36776c8d48d0db828c7e362b140da2 | refs/heads/master | 2020-03-28T20:26:25.031422 | 2018-09-17T05:09:30 | 2018-09-17T05:09:30 | 149,070,636 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,770 | py | #!/usr/bin/env python
"""
The Chat Handlers
Copyright 2010 Netgamix LLC
License: http://netgamix.com/information/terms/
"""
import header
class MainHandler(webapp.RequestHandler):
def get(self):
self.session = Session()
error = self.session['error'] if 'error' in self.session else ""
template_vars={'error':error}
temp = os.path.join(os.path.dirname(__file__),'templates/main.html')
outstr = template.render(temp, template_vars)
self.response.out.write(outstr)
class ChatHandler(webapp.RequestHandler):
def get(self):
self.redirect('/')
def post(self):
# Some session from http://gaeutilities.appspot.com/
self.session = Session()
# obtain the nick
nick = self.request.get('nick')
if not nick:
self.redirect('/')
# check if a user with that nick already exists
user = OnlineUser.all().filter('nick =', nick).get()
if user:
self.session['error']='That nickname is taken'
self.redirect('/')
return
else:
self.session['error']=''
# generate a unique id for the channel api
channel_id=str(uuid.uuid4())
chat_token = channel.create_channel(channel_id)
# save the user
user = model.OnlineUser(nick=nick,channel_id=channel_id)
user.put()
# obtain all the messages
messages=model.Message.all().order('date').fetch(1000)
# generate the template and answer back to the user
template_vars={'nick':nick,'messages':messages,'channel_id':channel_id,'chat_token':chat_token}
temp = os.path.join(os.path.dirname(__file__),'templates/chat.html')
outstr = template.render(temp, template_vars)
self.response.out.write(outstr)
class NewMessageHandler(webapp.RequestHandler):
def post(self):
# Get the parameters
text = self.request.get('text')
channel_id = self.request.get('channel_id')
q = db.GqlQuery("SELECT * FROM OnlineUser WHERE channel_id = :1", channel_id)
nick = q.fetch(1)[0].nick
date = datetime.datetime.now()
message=model.Message(user=nick,text=strip_tags(text), date = date, date_string = date.strftime("%H:%M:%S"))
message.put()
# Generate the template with the message
messages=[message]
template_vars={'messages':messages}
temp = os.path.join(os.path.dirname(__file__),'templates/messages.html')
outstr = template.render(temp, template_vars)
channel_msg = json.dumps({'success':True,"html":outstr})
# Send the message to all the connected users
users = model.OnlineUser.all().fetch(100)
for user in users:
channel.send_message(user.channel_id, channel_msg)
class ClearDBHandler(webapp.RequestHandler):
def get(self):
q = OnlineUser.all().filter('opened_socket =', False)
users = q.fetch(1000)
for user in users:
if ((datetime.datetime.now() - user.creation_date).seconds > 120):
db.delete(user)
| [
"[email protected]"
] | |
a22c702811b62c295c71fa1fb4f4aff77ee8108e | 9188d0d7ce9fc5fadf4d2593741894e1448f9326 | /indico/vendor/django_mail/backends/locmem.py | 4e3f2ecda085f3995cb3fbe9698f0f8bd4f91571 | [
"MIT"
] | permissive | vaclavstepan/indico | b411410416acdfa50b0d374f89ec8208de00fb2f | 8ca1ac4d4a958f22f24580a790b3cb015570bdfb | refs/heads/master | 2023-07-21T04:42:03.031131 | 2021-09-01T09:54:17 | 2021-09-01T09:54:17 | 385,897,420 | 0 | 0 | MIT | 2021-07-16T13:07:32 | 2021-07-14T10:16:57 | null | UTF-8 | Python | false | false | 1,481 | py | # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
# The code in here is taken almost verbatim from `django.core.mail.backends.locmem`,
# which is licensed under the three-clause BSD license and is originally
# available on the following URL:
# https://github.com/django/django/blob/stable/3.1.x/django/core/mail/backends/locmem.py
# Credits of the original code go to the Django Software Foundation
# and their contributors.
"""
Backend for test environment.
"""
from indico.vendor import django_mail
from .base import BaseEmailBackend
class EmailBackend(BaseEmailBackend):
"""
An email backend for use during test sessions.
The test connection stores email messages in a dummy outbox,
rather than sending them out on the wire.
The dummy outbox is accessible through the outbox instance attribute.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not hasattr(django_mail, 'outbox'):
django_mail.outbox = []
def send_messages(self, messages):
"""Redirect messages to the dummy outbox"""
msg_count = 0
for message in messages: # .message() triggers header validation
message.message()
django_mail.outbox.append(message)
msg_count += 1
return msg_count
| [
"[email protected]"
] | |
f6b37a366b054c1e431aefa17fb9943058075c76 | 0b0f4c15abb143a4b24b3bb01b9a6184df6867bf | /hello_requests/yes_or_no.py | f8b0ac04aff02b0e85c9cd9e2d08c68386c67414 | [] | no_license | saida93522/hello_requests_python | efe15130cbb327a78d3ef1aa8d6e43fdaf339709 | 76dc533dea9e1b1413d04b3fc20c52987139b780 | refs/heads/master | 2023-05-02T20:43:30.906319 | 2020-10-16T20:00:32 | 2020-10-16T20:00:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 834 | py | import requests
import logging
class APIError(Exception):
pass
def yes_or_no():
try:
response = requests.get('https://yesno.wtf/api/')
response.raise_for_status()
except Exception as e:
logging.exception(e)
raise APIError('Error connecting to API')
try:
data = response.json()
except Exception as e:
logging.exception(e)
raise APIError('Data returned is not JSON') from e
try:
answer = data['answer']
except Exception as e:
logging.exception(e)
raise APIError('JSON does not contain expected data') from e
return answer
def main():
try:
answer = yes_or_no()
print(answer)
except APIError as e:
message, = e.args
print(message)
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
80f1054d62723c8c77724d8030855559e25913d2 | 87eda1daa7c4ae0d5ad653a76d227d2f74a3c4a6 | /memn2n/memn2n/memn2n.py | 92a280b721b277fbdbf941cbd8b7397b72ae20e2 | [
"MIT"
] | permissive | xxxJenxxx/MEMn2n | 15ec3a2ca3c578948a106e9d477b8c2cfb7e187f | 340bdfa1e8fc41c097509ff8d06cd2c6d709112b | refs/heads/master | 2020-04-26T09:49:56.464233 | 2019-03-02T16:16:47 | 2019-03-02T16:16:47 | 173,469,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,348 | py | """End-To-End Memory Networks.
The implementation is based on http://arxiv.org/abs/1503.08895 [1]
"""
from __future__ import absolute_import
from __future__ import division
import tensorflow as tf
import numpy as np
from six.moves import range
def position_encoding(sentence_size, embedding_size):
"""
Position Encoding described in section 4.1 [1]
"""
encoding = np.ones((embedding_size, sentence_size), dtype=np.float32)
ls = sentence_size+1
le = embedding_size+1
for i in range(1, le):
for j in range(1, ls):
encoding[i-1, j-1] = (i - (embedding_size+1)/2) * (j - (sentence_size+1)/2)
encoding = 1 + 4 * encoding / embedding_size / sentence_size
# Make position encoding of time words identity to avoid modifying them
encoding[:, -1] = 1.0
return np.transpose(encoding)
def zero_nil_slot(t, name=None):
"""
Overwrites the nil_slot (first row) of the input Tensor with zeros.
The nil_slot is a dummy slot and should not be trained and influence
the training algorithm.
"""
with tf.name_scope(name, "zero_nil_slot", [t]) as name:
#with tf.op_scope([t], name, "zero_nil_slot") as name:
t = tf.convert_to_tensor(t, name="t")
s = tf.shape(t)[1]
z = tf.zeros(tf.stack([1, s]))
return tf.concat(axis=0, values=[z, tf.slice(t, [1, 0], [-1, -1])], name=name)
def add_gradient_noise(t, stddev=1e-3, name=None):
"""
Adds gradient noise as described in http://arxiv.org/abs/1511.06807 [2].
The input Tensor `t` should be a gradient.
The output will be `t` + gaussian noise.
0.001 was said to be a good fixed value for memory networks [2].
"""
with tf.name_scope(name, "add_gradient_noise", [t, stddev]) as name:
#with tf.op_scope([t, stddev], name, "add_gradient_noise") as name:
t = tf.convert_to_tensor(t, name="t")
gn = tf.random_normal(tf.shape(t), stddev=stddev)
return tf.add(t, gn, name=name)
class MemN2N(object):
"""End-To-End Memory Network."""
def __init__(self, batch_size, vocab_size, sentence_size, memory_size, embedding_size,
session,
hops,
max_grad_norm,
nonlin=None,
initializer=tf.random_normal_initializer(stddev=0.1),
encoding=position_encoding,
name='MemN2N'):
"""Creates an End-To-End Memory Network
Args:
batch_size: The size of the batch.
vocab_size: The size of the vocabulary (should include the nil word). The nil word
one-hot encoding should be 0.
sentence_size: The max size of a sentence in the data. All sentences should be padded
to this length. If padding is required it should be done with nil one-hot encoding (0).
memory_size: The max size of the memory. Since Tensorflow currently does not support jagged arrays
all memories must be padded to this length. If padding is required, the extra memories should be
empty memories; memories filled with the nil word ([0, 0, 0, ......, 0]).
embedding_size: The size of the word embedding.
hops: The number of hops. A hop consists of reading and addressing a memory slot.
Defaults to `3`.
max_grad_norm: Maximum L2 norm clipping value. Defaults to `40.0`.
nonlin: Non-linearity. Defaults to `None`.
initializer: Weight initializer. Defaults to `tf.random_normal_initializer(stddev=0.1)`.
optimizer: Optimizer algorithm used for SGD. Defaults to `tf.train.AdamOptimizer(learning_rate=1e-2)`.
encoding: A function returning a 2D Tensor (sentence_size, embedding_size). Defaults to `position_encoding`.
session: Tensorflow Session the model is run with. Defaults to `tf.Session()`.
name: Name of the End-To-End Memory Network. Defaults to `MemN2N`.
"""
#print("Enter Memn2n")
self._batch_size = batch_size
self._vocab_size = vocab_size
self._sentence_size = sentence_size
self._memory_size = memory_size
self._embedding_size = embedding_size
self._hops = hops
self._max_grad_norm = max_grad_norm
self._nonlin = nonlin
self._init = initializer
self._name = name
self._build_inputs()
self._build_vars()
self._opt = tf.train.GradientDescentOptimizer(learning_rate=self._lr)
self._encoding = tf.constant(encoding(self._sentence_size, self._embedding_size), name="encoding")
#savestuff //something wrong with it
tf.add_to_collection('_vocab_size', self._vocab_size)
tf.add_to_collection('_sentence_size', self._sentence_size)
tf.add_to_collection('_memory_size', self._memory_size)
#print("cross entropy")
# cross entropy
logits = self._inference(self._stories, self._queries) # (batch_size, vocab_size)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=tf.cast(self._answers, tf.float32), name="cross_entropy")
cross_entropy_sum = tf.reduce_sum(cross_entropy, name="cross_entropy_sum")
#print("loss op")
# loss op
loss_op = cross_entropy_sum
#print(loss_op)
# gradient pipeline
grads_and_vars = self._opt.compute_gradients(loss_op)
#print(self._max_grad_norm)
#print(grads_and_vars)
grads_and_vars = [(tf.clip_by_norm(g, self._max_grad_norm), v) for g,v in grads_and_vars]
grads_and_vars = [(add_gradient_noise(g), v) for g,v in grads_and_vars]
nil_grads_and_vars = []
for g, v in grads_and_vars:
if v.name in self._nil_vars:
nil_grads_and_vars.append((zero_nil_slot(g), v))
else:
nil_grads_and_vars.append((g, v))
train_op = self._opt.apply_gradients(nil_grads_and_vars, name="train_op")
print("predict")
# predict ops
predict_op = tf.argmax(logits, 1, name="predict_op")
predict_proba_op = tf.nn.softmax(logits, name="predict_proba_op")
predict_log_proba_op = tf.log(predict_proba_op, name="predict_log_proba_op")
# assign ops
self.loss_op = loss_op
self.predict_op = predict_op
self.predict_proba_op = predict_proba_op
self.predict_log_proba_op = predict_log_proba_op
self.train_op = train_op
init_op = tf.global_variables_initializer()
self._sess = session
self._sess.run(init_op)
def _build_inputs(self):
self._stories = tf.placeholder(tf.int32, [None, self._memory_size, self._sentence_size], name="stories")
self._queries = tf.placeholder(tf.int32, [None, self._sentence_size], name="queries")
#self._answers = tf.placeholder(tf.int32, [None, 10], name="answers")
self._answers = tf.placeholder(tf.int32, [None, self._vocab_size], name="answers")
self._lr = tf.placeholder(tf.float32, [], name="learning_rate")
def _build_vars(self):
with tf.variable_scope(self._name):
nil_word_slot = tf.zeros([1, self._embedding_size])
A = tf.concat(axis=0, values=[ nil_word_slot, self._init([self._vocab_size-1, self._embedding_size]) ])
C = tf.concat(axis=0, values=[ nil_word_slot, self._init([self._vocab_size-1, self._embedding_size]) ])
self.A_1 = tf.Variable(A, name="A")
self.C = []
for hopn in range(self._hops):
with tf.variable_scope('hop_{}'.format(hopn)):
self.C.append(tf.Variable(C, name="C"))
# Dont use projection for layerwise weight sharing
# self.H = tf.Variable(self._init([self._embedding_size, self._embedding_size]), name="H")
# Use final C as replacement for W
# self.W = tf.Variable(self._init([self._embedding_size, self._vocab_size]), name="W")
self._nil_vars = set([self.A_1.name] + [x.name for x in self.C])
def _inference(self, stories, queries):
with tf.variable_scope(self._name):
# Use A_1 for thee question embedding as per Adjacent Weight Sharing
q_emb = tf.nn.embedding_lookup(self.A_1, queries)
u_0 = tf.reduce_sum(q_emb * self._encoding, 1)
u = [u_0]
for hopn in range(self._hops):
if hopn == 0:
m_emb_A = tf.nn.embedding_lookup(self.A_1, stories)
m_A = tf.reduce_sum(m_emb_A * self._encoding, 2)
else:
with tf.variable_scope('hop_{}'.format(hopn - 1)):
m_emb_A = tf.nn.embedding_lookup(self.C[hopn - 1], stories)
m_A = tf.reduce_sum(m_emb_A * self._encoding, 2)
# hack to get around no reduce_dot
u_temp = tf.transpose(tf.expand_dims(u[-1], -1), [0, 2, 1])
dotted = tf.reduce_sum(m_A * u_temp, 2)
# Calculate probabilities
probs = tf.nn.softmax(dotted)
probs_temp = tf.transpose(tf.expand_dims(probs, -1), [0, 2, 1])
with tf.variable_scope('hop_{}'.format(hopn)):
m_emb_C = tf.nn.embedding_lookup(self.C[hopn], stories)
m_C = tf.reduce_sum(m_emb_C * self._encoding, 2)
c_temp = tf.transpose(m_C, [0, 2, 1])
o_k = tf.reduce_sum(c_temp * probs_temp, 2)
# Dont use projection layer for adj weight sharing
# u_k = tf.matmul(u[-1], self.H) + o_k
u_k = u[-1] + o_k
# nonlinearity
if self._nonlin:
u_k = nonlin(u_k)
u.append(u_k)
#try to add w to change output size
#W = tf.truncated_normal([10,self._embedding_size], stddev=0.1)
# Use last C for output (transposed)
with tf.variable_scope('hop_{}'.format(self._hops)):
#return tf.matmul(u_k, tf.transpose(W))
return tf.matmul(u_k, tf.transpose(self.C[-1], [1,0]))
def batch_fit(self, stories, queries, answers, learning_rate):
"""Runs the training algorithm over the passed batch
Args:
stories: Tensor (None, memory_size, sentence_size)
queries: Tensor (None, sentence_size)
answers: Tensor (None, vocab_size)
Returns:
loss: floating-point number, the loss computed for the batch
"""
feed_dict = {self._stories: stories, self._queries: queries, self._answers: answers, self._lr: learning_rate}
loss, _ = self._sess.run([self.loss_op, self.train_op], feed_dict=feed_dict)
return loss
def predict(self, stories, queries):
"""Predicts answers as one-hot encoding.
Args:
stories: Tensor (None, memory_size, sentence_size)
queries: Tensor (None, sentence_size)
Returns:
answers: Tensor (None, vocab_size)
"""
feed_dict = {self._stories: stories, self._queries: queries}
return self._sess.run(self.predict_op, feed_dict=feed_dict)
def predict_proba(self, stories, queries):
"""Predicts probabilities of answers.
Args:
stories: Tensor (None, memory_size, sentence_size)
queries: Tensor (None, sentence_size)
Returns:
answers: Tensor (None, vocab_size)
"""
feed_dict = {self._stories: stories, self._queries: queries}
return self._sess.run(self.predict_proba_op, feed_dict=feed_dict)
def predict_log_proba(self, stories, queries):
"""Predicts log probabilities of answers.
Args:
stories: Tensor (None, memory_size, sentence_size)
queries: Tensor (None, sentence_size)
Returns:
answers: Tensor (None, vocab_size)
"""
feed_dict = {self._stories: stories, self._queries: queries}
return self._sess.run(self.predict_log_proba_op, feed_dict=feed_dict)
| [
"[email protected]"
] | |
2e1a4eced854d9a4bdaf560370d10aa7bad3ffb8 | e529ad7e07468431239ea3bbf41fb829e75809e7 | /multi_model_train/__init__.py | 6cccaa1342c2e05c579fbf7b4f98a7d596070a99 | [] | no_license | khmurakami/multi_model_train_dataflow | 43671f3d0c425855e61c5046429066e748df54ff | 9a401191379ce42e819a9e49777282faf71cdd6a | refs/heads/master | 2022-01-14T04:56:47.876667 | 2019-05-19T02:31:09 | 2019-05-19T02:31:09 | 186,052,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 105 | py | from .multi_model_train import MultiModelTrain
from .utils import return_json_file, read_parameters_json
| [
"[email protected]"
] | |
e28dde26a9b8013a59ef26b099fadafd5b640635 | c39ae20751f8a81ae6ca5dd1a486c19cb69627c0 | /Database/Big_Query/Creation_scripts/cumulative_returns.py | 262b080471ce87ddc38e345289dd77937548aed1 | [] | no_license | Ranga94/dax_project | 34d73ee05947ee561eeb613fd9b6e5a5ac9b971a | 7898570207ac99e6ff1d4fb5b2f51bdb7ced4733 | refs/heads/master | 2021-05-15T05:24:37.700366 | 2018-02-28T12:24:17 | 2018-02-28T12:24:17 | 117,215,872 | 0 | 0 | null | 2018-01-12T08:38:06 | 2018-01-12T08:38:06 | null | UTF-8 | Python | false | false | 1,425 | py | from google.cloud import bigquery
import sys
def create_table(dataset_id, table_id, project):
"""Creates a simple table in the given dataset.
If no project is specified, then the currently active project is used.
"""
bigquery_client = bigquery.Client(project=project)
dataset_ref = bigquery_client.dataset(dataset_id)
table_ref = dataset_ref.table(table_id)
table = bigquery.Table(table_ref)
# Set the table schema
table.schema = (
bigquery.SchemaField('Constituent', 'STRING','REQUIRED'),
bigquery.SchemaField('Constituent_id', 'STRING','REQUIRED'),
bigquery.SchemaField('Constituent_name', 'STRING','REQUIRED'),
bigquery.SchemaField('Cumulative_return_consistency_score','INTEGER','NULLABLE'),
bigquery.SchemaField('Date_of_analysis','STRING','NULLABLE'),
bigquery.SchemaField('From_date','STRING','REQUIRED'),
bigquery.SchemaField('Status','STRING','NULLABLE'),
bigquery.SchemaField('Table','STRING','NULLABLE'),
bigquery.SchemaField('To_date','STRING','REQUIRED'),
bigquery.SchemaField('one_year_return','FLOAT','REQUIRED'),
bigquery.SchemaField('six_months_return','FLOAT','REQUIRED'),
bigquery.SchemaField('three_years_return','FLOAT','REQUIRED'),
)
table = bigquery_client.create_table(table)
print('Created table {} in dataset {}.'.format(table_id, dataset_id))
if __name__ == '__main__':
create_table(sys.argv[1],sys.argv[2],"igenie-project") | [
"[email protected]"
] | |
9c0ff87bb7893a27a6f2ce9b6a089f64ba40a399 | ecb86f922de930e08e945f5a4b5758e13841fe1d | /find_empty_region.py | 301d55e84495c1b6325944a09af28db51328ff85 | [] | no_license | azharul/misc_problems | b5e8fb0f1660a10399bfb47e9d1eed933938806b | 4c8706c348a39aab28f2ff0962695ffd93b553a7 | refs/heads/master | 2021-01-12T08:15:06.177939 | 2017-01-22T03:40:50 | 2017-01-22T03:40:50 | 76,521,464 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#Given a sorted array [0-99] With input: [1, 5, 45, 86] Write a function that prints the empty regions, example Output: “0,2-4,6-44,46-85,87-99”
# solution using iteration
def find_empty_region(arr):
for i in range(len(arr)):
if arr[i] is None:
print i
a=[None,1,None,None,4,5,None,None,None,9,None,None,None,None,None,15]
find_empty_region(a)
| [
"[email protected]"
] | |
192f5be2ea74ff5dd2215cd23fc641a35c5f9e09 | 3715df2c833919376a3ee44de8fc64d1c2abe8ce | /AutomlCore/build/lib/algorithms/classification/naive_bayes_complement.py | f057f2f40891211ae4bcc2e4da46ea7a74226a34 | [] | no_license | mindis/dachshund | 6c07f8eb6b9f75f66c74ec3748dfa42a31cff4d5 | 2f1b3e5866e06424c700f3a106051fe69bcb18bc | refs/heads/master | 2022-12-09T05:17:00.812348 | 2020-09-03T05:25:09 | 2020-09-03T05:25:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 963 | py | import numpy as np
from sklearn.naive_bayes import ComplementNB
from sklearn.metrics import accuracy_score
from hyperopt import hp
from utils import definitions
from .. import model, model_classification
class ComplementNBClassifier(model.Model, model_classification.ModelClassification):
def __init__(self, _project_name):
super().__init__(_project_name)
self.model_name = 'NaiveBayesComplement'
self.params_list = {}
def getHyperParameterSpace(self):
return {
'alpha': hp.uniform('alpha', 0, 100),
'norm': [False, True],
}
def getModel(self, _params):
return ComplementNB(
alpha= _params['alpha'],
norm= _params['norm'],
)
def trainModel(self, x, y, _params):
self.model = self.getModel(_params)
self.model.fit(x, y)
self.saveModel()
def getPredictResult(self, x):
return self.model.predict(x)
def getPredictProbaResult(self, x):
return self.model.predict_proba(x)
| [
"[email protected]"
] | |
d712585cbc9c1e8c1e0865c28d6b449e189940a6 | 16a4d94fd018f5483f6cfca4e4d06fb4b7fff9b4 | /manage.py | aff974ebdf0e4ade18b7aa6d5d6fefe45a9a7d65 | [] | no_license | upfind/BlogFlask | b304daaf7de10324c39913c6d87d1c832f0f36fc | 21007415d32c585b052494e10426e8b06c38db4b | refs/heads/master | 2020-03-10T04:31:33.992697 | 2018-04-17T01:26:27 | 2018-04-17T01:26:27 | 129,194,374 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from BlogFlask import app
from exts import db
from models import User, Question, Answer
manager = Manager(app)
# 使用Migrate绑定App和db
migrate = Migrate(app, db)
# 添加迁移脚本的命令到manager中
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
| [
"[email protected]"
] | |
c7cee1ce7b74766902666791db0f8f0747fb6459 | aabe7008e0eb77617f1a76cddb98e4b17fd5ce27 | /examples/model_compress/pruning/v2/movement_pruning_glue.py | a8365d9834075161641b7946bb41b2c961cfe190 | [
"MIT"
] | permissive | penghouwen/nni | a09a374a81be46fe246c425275585d5fe79404af | 2e6a2fd2df0d5700cb028b25156bb535a3fc227a | refs/heads/master | 2021-12-21T14:02:32.228973 | 2021-12-13T16:54:39 | 2021-12-13T16:54:39 | 435,926,123 | 1 | 0 | MIT | 2021-12-07T15:09:36 | 2021-12-07T15:09:35 | null | UTF-8 | Python | false | false | 4,715 | py | import functools
from tqdm import tqdm
import torch
from torch.optim import Adam
from torch.utils.data import DataLoader
from datasets import load_metric, load_dataset
from transformers import (
BertForSequenceClassification,
BertTokenizerFast,
DataCollatorWithPadding,
set_seed
)
from nni.algorithms.compression.v2.pytorch.pruning import MovementPruner
task_to_keys = {
"cola": ("sentence", None),
"mnli": ("premise", "hypothesis"),
"mrpc": ("sentence1", "sentence2"),
"qnli": ("question", "sentence"),
"qqp": ("question1", "question2"),
"rte": ("sentence1", "sentence2"),
"sst2": ("sentence", None),
"stsb": ("sentence1", "sentence2"),
"wnli": ("sentence1", "sentence2"),
}
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
gradient_accumulation_steps = 16
# a fake criterion because huggingface output already has loss
def criterion(input, target):
return input.loss
def trainer(model, optimizer, criterion, train_dataloader):
model.train()
counter = 0
for batch in tqdm(train_dataloader):
counter += 1
batch.to(device)
optimizer.zero_grad()
outputs = model(**batch)
# pruner may wrap the criterion, for example, loss = origin_loss + norm(weight), so call criterion to get loss here
loss = criterion(outputs, None)
loss = loss / gradient_accumulation_steps
loss.backward()
if counter % gradient_accumulation_steps == 0 or counter == len(train_dataloader):
optimizer.step()
if counter % 16000 == 0:
print('Step {}: {}'.format(counter // gradient_accumulation_steps, evaluator(model, metric, is_regression, validate_dataloader)))
def evaluator(model, metric, is_regression, eval_dataloader):
model.eval()
for batch in tqdm(eval_dataloader):
batch.to(device)
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=-1) if not is_regression else outputs.logits.squeeze()
metric.add_batch(
predictions=predictions,
references=batch["labels"],
)
return metric.compute()
if __name__ == '__main__':
task_name = 'mnli'
is_regression = False
num_labels = 1 if is_regression else (3 if task_name == 'mnli' else 2)
train_batch_size = 8
eval_batch_size = 8
set_seed(1024)
tokenizer = BertTokenizerFast.from_pretrained('bert-base-cased')
sentence1_key, sentence2_key = task_to_keys[task_name]
# used to preprocess the raw data
def preprocess_function(examples):
# Tokenize the texts
args = (
(examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key])
)
result = tokenizer(*args, padding=False, max_length=128, truncation=True)
if "label" in examples:
# In all cases, rename the column to labels because the model will expect that.
result["labels"] = examples["label"]
return result
raw_datasets = load_dataset('glue', task_name, cache_dir='./data')
processed_datasets = raw_datasets.map(preprocess_function, batched=True, remove_columns=raw_datasets["train"].column_names)
train_dataset = processed_datasets['train']
validate_dataset = processed_datasets['validation_matched' if task_name == "mnli" else 'validation']
data_collator = DataCollatorWithPadding(tokenizer)
train_dataloader = DataLoader(train_dataset, shuffle=True, collate_fn=data_collator, batch_size=train_batch_size)
validate_dataloader = DataLoader(validate_dataset, collate_fn=data_collator, batch_size=eval_batch_size)
metric = load_metric("glue", task_name)
model = BertForSequenceClassification.from_pretrained('bert-base-cased', num_labels=num_labels).to(device)
print('Initial: {}'.format(evaluator(model, metric, is_regression, validate_dataloader)))
config_list = [{'op_types': ['Linear'], 'op_partial_names': ['bert.encoder'], 'sparsity': 0.9}]
p_trainer = functools.partial(trainer, train_dataloader=train_dataloader)
optimizer = Adam(model.parameters(), lr=2e-5)
pruner = MovementPruner(model, config_list, p_trainer, optimizer, criterion, training_epochs=10,
warm_up_step=3000, cool_down_beginning_step=27000)
_, masks = pruner.compress()
pruner.show_pruned_weights()
print('Final: {}'.format(evaluator(model, metric, is_regression, validate_dataloader)))
optimizer = Adam(model.parameters(), lr=2e-5)
trainer(model, optimizer, criterion, train_dataloader)
print('After 1 epoch finetuning: {}'.format(evaluator(model, metric, is_regression, validate_dataloader)))
| [
"[email protected]"
] | |
f4b07d056b6b0304d6a97622d9ff3ea596b95948 | 61e98b0302a43ab685be4c255b4ecf2979db55b6 | /sdks/python/.tox/lint/lib/python2.7/site-packages/pylint/test/input/func_e0604.py | d077f31adbfa8f55fc8501b1293f76262c0e2dfc | [
"Apache-2.0",
"BSD-3-Clause",
"EPL-2.0",
"CDDL-1.0",
"WTFPL",
"GPL-2.0-only",
"BSD-2-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"CDDL-1.1",
"Classpath-exception-2.0"
] | permissive | dzenyu/kafka | 5631c05a6de6e288baeb8955bdddf2ff60ec2a0e | d69a24bce8d108f43376271f89ecc3b81c7b6622 | refs/heads/master | 2021-07-16T12:31:09.623509 | 2021-06-28T18:22:16 | 2021-06-28T18:22:16 | 198,724,535 | 0 | 0 | Apache-2.0 | 2019-07-24T23:51:47 | 2019-07-24T23:51:46 | null | UTF-8 | Python | false | false | 206 | py | """Test for invalid objects in a module's __all__ variable.
"""
# pylint: disable=R0903,R0201,W0612
__revision__ = 0
def some_function():
"""Just a function."""
pass
__all__ = [some_function]
| [
"[email protected]"
] | |
0ea2deb0e70b0c11108b74b5f630dc873fde2ce4 | 5c9c08083658095eff1bc727e677c843a1c989b4 | /consumerproj4/venv/bin/wheel | 8202fdf8932400837a2223937e543b77bcd47609 | [] | no_license | xc150/project4 | 61d987a1e7a5dbf43597f3bca744c7c3ebedbf6c | ec92d399c470fad3f73d7d47acc98651bff478c5 | refs/heads/master | 2022-04-21T18:37:40.645727 | 2020-04-23T22:16:24 | 2020-04-23T22:16:24 | 258,323,197 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | #!/home/ec2-user/environment/consumerproj4/venv/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
4162505f9cfb68cb567a576ee510ee61c4fafcea | 9acdfa61fc1408631482d0ac11c5dedd79ee32ef | /djangoApp/pca/urls.py | 4346aca879a4c3edcf4ead806011a223cf96870c | [] | no_license | helbendak/FinalProject | fea950344ccef7a97929233a8461dd0240761202 | ee7747011cf441d8bd5cd62b8660a7a81fe04a94 | refs/heads/master | 2020-04-25T18:47:46.606861 | 2019-05-13T00:18:15 | 2019-05-13T00:18:15 | 172,996,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 757 | py | """finalProject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path
from . import views
urlpatterns = [
path('plotpca/', views.plotPCA, name='plotpca'),
]
| [
"[email protected]"
] | |
49f14e2117e3a266eb4b1adb9bc98e67bb907ac8 | 35165133d0cfb40117708f4b7f0f9f751508e115 | /back-end/chem-backend/General/Self_Assess_Answer_Generation/Data.py | 63d5633677ce7d98a1668b83b281512923354759 | [] | no_license | Chief97/edubot | 4297ea48278a2a8beb2ba7717df0704f627acce1 | 90ca0d3bbf3224c31868cb1831996a78076ddc10 | refs/heads/main | 2023-04-24T07:20:21.202870 | 2021-05-20T03:06:17 | 2021-05-20T03:06:17 | 365,427,102 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,366 | py | allSentenceAndQuestion = [ [
"But if the mass of very small particles such as a carbon @dash molecule, or a helium atom is given in units like kilogram or gram, the value obtained is extremely small",
"Is if the mass of very small particles such as a carbon dioxide molecule, or a helium atom given in units like kilogram or gram, the value obtained extremely small?",
],
[
"To measure the mass of items like a motor car, a brick, a loaf bread, a tea spoonful of sugar and a tablet of medicine, units like kilo @dash , @dash and milli @dash can be used",
"Can to measure the mass of items like a motor car, a brick, a loaf bread, a tea spoonful of sugar and a tablet of medicine, units like kilogram, gram and milligram be used?"
],
[
"In the past, the @dash of an atom of hydrogen, the lightest element was used as the atomic @dash unit"
],
[
"The mass so expressed is known as the relative @dash mass",
"Is the mass so expressed known as the relative atomic mass?",
"What so expressed is known as the relative atomic mass?"
],
[
"The mass of the unit relative to which the masses of other atoms are expressed is called the @dash mass unit",
"Are the mass of the unit relative to which the masses of other atoms expressed is called the atomic mass unit?",
"What is called the atomic mass unit?"
],
[
"The relative atomic mass is not the true mass of an atom of an @dash ",
"Is the relative atomic mass not the true mass of an atom of an element?",
"What is not the true mass of an atom of an element?"
],
[
"Since many elements are reactive, their @dash do not exist as free @dash ",
"Are since many elements reactive, their atoms do not exist as free atoms?"
],
[
" @dash are composed of molecules formed by the combination of atoms belonging to different elements",
"Are compounds composed of molecules formed by the combination of atoms belonging to different elements?"
],
[
"They exist naturally as molecules formed by joining @dash or more atoms of them"
],
[
"When a mass of any element equal to its relative atomic mass is taken in grams, it is seen that it contains the same @dash of atoms irrespective of the element",
"Is when a mass of any element equal to its relative atomic mass taken in grams, it seen that it contains the same number of atoms irrespective of the element?"
],
[
"In such compounds what is calculated as the relative molecular mass is the mass relevant to their empirical @dash ",
"Is in such compounds what calculated as the relative molecular mass the mass relevant to their empirical formula?",
"What is the mass relevant to their empirical formula?"
],
[
"After the great scientist @dash , this constant number is called Avogadro Constant"
],
[
"Similarly, it can also be shown that when a mass of any @dash equal to its relative molecular mass is taken in grams, it contains the same number of molecules",
"Can , it also be shown that when a mass of any substance equal to its relative molecular mass is taken in grams, it contains the same number of molecules?"
],
[
"It is known as the relative @dash mass or @dash mass",
"Is it known as the relative formula mass or formula mass?"
],
[
"Similarly 'ream' is used to measure the @dash of papers",
"Is 'ream' used to measure the amount of papers?"
],
[
"dozen of books means @dash books",
"Does dozen of books means 12 books?",
"What means 12 books?"
],
[
"Thus, the relative atomic mass of any element taken in grams contains @dash mole of atoms ",
"What contains one mole of atoms ?"
],
[
"In the @dash unit system, the unit used to measure the amount of a substance is the mole",
"Is in the si unit system, the unit used to measure the amount of a substance the mole?"
],
[
"The relative molecular mass of any substance taken in grams contains @dash mole of molecules ",
"What contains one mole of molecules ?"
],
[
"Therefore, the @dash mole is practically used to measure the amounts of things such as atoms, molecules and ions which exist in very large numbers",
"Is the unit mole practically used to measure the amounts of things such as atoms, molecules and ions which ext in very large numbers?",
" What is practically used to measure the amounts of things such as atoms, molecules and ions which exist in very large numbers?"
],
[
"mole of an element or a compound that exists as @dash means a mole of @dash of them"
],
[
"In order to have a mole of molecules of a given compound, its relative molecular mass has to be weighed out in @dash "
],
[
"Therefore, other @dash are used to measure the mole",
"Why is other methods are used to measure the mole?",
"Are other methods used to measure the mole?",
" What are used to measure the mole?"
],
[
" @dash method to have a mole of atoms of an element is weighing out its relative atomic mass in grams",
"Is one method to have a mole of atoms of an element weighing out its relative atomic mass in grams?",
"What is weighing out its relative atomic mass in grams?"
],
[
"As the number of units belonging to a mole is very @dash counting is impossible",
"Is as the number of units belonging to a mole very large, counting impossible?"
],
[
"Molar mass is the mass of a @dash of any substance",
"Is molar mass the mass of a mole of any substance?",
"Is molecular mass the mass of a mole of any substance?",
"What is the mass of a mole of any substance?"
],
[
"In the atoms of these element, electrons in the valence shell reorganise to acquire the stable @dash configuration"
],
[
"But, the @dash of sodium and chlorine atoms is different",
"Is , the state of sodium and chlorine atoms different?",
"Is , the state of sodium and chlorine atoms like?",
" What is different?"
],
[
"That means, loss, gain or sharing of @dash occurs",
"Does that means, loss, gain or sharing of electrons occurs?"
],
[
"Similarly a chlorine atom can attain the stable electronic configuration by receiving a single electron or by removing @dash electrons",
"Can a chlorine atom attain the stable electronic configuration by receiving a single electron or by removing seven electrons?",
" What can attain the stable electronic configuration by receiving a single electron or by removing seven electrons?"
],
[
"This electronic structure has been identified as a @dash electronic configuration",
"What has been identified as a stable electronic configuration?"
],
[
"The attractive forces or binding among the atoms or ions resulted by the rearrangement of electrons in the valence shell for stabilizing the atoms of elements as described above are called chemical @dash ",
"Are the attractive forces or binding among the atoms or ions resulted by the rearrangement of electrons in the valence shell for stabilizing the atoms of elements as described above called chemical bonds?",
"What are called chemical bonds?"
],
[
"How can the chemical bonds be divided?",
"According to the way the participating atoms behave when they chemically bind together, the chemical bonds can be divided into @dash types",
"Can according to the way the participating atoms behave when they chemically bind together, the chemical bonds can be divided into two types?"
],
[
"An ion is an atom or a group of atoms with an @dash charge",
"Is an ion an atom or a group of atoms with an electrical charge?",
"What is an atom or a group of atoms with an electrical charge?"
],
[
"Neutral atoms form positively charged ions by losing @dash ",
"Does neutral atoms form positively charged ions by losing electrons?",
"What positively charged ions by losing electrons?"
],
[
"Neutral atoms form negatively charged @dash by gaining electrons",
"Does neutral atoms form negatively charged ions by gaining electrons?",
"What negatively charged ions by gaining electrons?"
],
[
"Sodium is an element with @dash electronegativity",
"Is sodium an element with low electronegativity?",
"Is sodium an element with high electronegativity?"
],
[
"The bonds formed due to the strong electrostatic attractions between the positive and negative ions produced by the exchange of electrons among atoms are known as ionic bonds or @dash bonds",
"Are the bonds formed due to the strong electrostatic attractions between the positive and negative ions produced by the exchange of electrons among atoms known as ionic bonds or electrovalent bonds?",
"What are known as ionic bonds or electrovalent bonds?"
],
[
"So, sodium @dash is a compound with ionic bonds",
"Is so, sodium chloride a compound with ionic bonds?"
],
[
"Are such compounds called ionic compounds?",
"What are called ionic compounds?"
],
[
"Electron sharing between atoms is another method of forming @dash among them",
"What is another method of forming bonds among them?"
],
[
"By sharing of electrons like this, the atoms acquire the noble @dash configuration"
],
[
"Sharing of electrons between atoms of the same kind gives rise to homoatomic @dash ",
"Does sharing of electrons between atoms of the same kind gives rise to homoatomic molecules?"
"What gives rise to homoatomic molecules?"
],
[
"Joining of atoms by sharing electrons between a pair of atoms is referred to as a covalent @dash ",
"Is joining of atoms by sharing electrons between a pair of atoms referred to as a covalent bond?",
"What is referred to as a covalent bond?"
],
[
"Showing covalent bonds of a molecule representing the valence shell electrons of its atoms only by dots is called the @dash dot diagram",
"Is showing covalent bonds of a molecule representing the valence shell electrons of its atoms only by dots called the lew dot diagram?",
"What is called the Lewis dot diagram?"
],
[
"These electrons are represented by @dash ",
"Are these electrons represented by dots?",
"What are represented by dots?"
],
[
"The electrons represented by dots are known as lone @dash whereas electrons @dash represented by lines are called bond @dash ",
"Are the electrons represented by dots known as lone pairs whereas electrons pairs represented by lines called bond pairs?",
"What are known as lone pairs whereas electrons pairs represented by lines are called bond pairs?"
],
[
"The Lewis dot and cross diagram illurtrates how @dash exist in the covalent bond",
"Does the lewis dot and cross diagram illurtrates how electrons exist in the covalent bond?"
],
[
"The electrons of hydrogen atoms which form covalent bonds with carbon are symbolized by the @dash ",
"Are the electrons of hydrogen atoms which form covalent bonds with carbon are symbolized by the crosses?",
"What are symbolized by the crosses?"
],
[
"Those are known as the @dash in which the octet of electrons is complete",
"Are those known as the compounds in which the octet of electrons is complete?"
],
[
"The electronic configuration of a chlorine atom is 2, @dash , 7",
"Is the electronic configuration of a chlorine atom 2, 8, 7?",
"What is 2, 8, 7?"
],
[
" @dash chlorine atoms share three pairs of electrons with an aluminium atom to form an AlCl3 molecule",
"What share three pairs of electrons with an aluminium atom to form an AlCl3 molecule?"
],
[
"In this, the electronic configuration of the aluminium atom is 2, 8, @dash "
],
[
"Carbon naturally occurs as @dash forms of atomic lattices, graphite and diamond",
"Does carbon naturally occurs as two forms of atomic lattices, graphite and diamond?"
],
[
"They are known as the @dash forms of carbon",
"Are they known as the allotropic forms of carbon?"
],
[
"Generally the melting @dash and boiling @dash of covalent compounds are low",
"Are generally the melting points and boiling points of covalent compounds low?"
],
[
"These @dash forms differ in the way the carbon atoms form covalent bonds with one another",
"Does these two forms differ in the way the carbon atoms form covalent bonds with one another?",
"What differ in the way the carbon atoms form covalent bonds with one another?"
],
[
"Are such lattices in which the atoms covalently bonded known as atomic lattices?",
"Are such lattices in which the atoms covalently bonded known as conventional lattices?"
],
[
"These layers are superimposed on @dash other",
"Are these layers superimposed on one other?",
"What are superimposed on one other?"
],
[
" @dash is the hardest substance found in nature",
"Is diamond the hardest substance found in nature?"
],
[
"Thus @dash layer can easily slide over the other",
"Can thus one layer easily slide over the other?"
],
[
"Diamond is a three dimensional lattice in which every carbon atom forms @dash single bonds with @dash other carbon atoms",
"Is diamond a three dimensional lattice in which every carbon atom forms four single bonds with four other carbon atoms?"
],
[
"Are the forces holding these layers weak?",
"Are the forces holding these layers strong?",
"What are weak?"
],
[
"Graphite consists of layers of carbon atoms formed by the joining of one carbon atom with @dash other carbon atoms by single bonds"
],
[
"This is known as @dash "
],
[
"What is symmetrical?"
],
[
"Hence @dash is a compound with polar covalent bonds",
"Why is water is a compound with polar covalent bonds?",
"Is hence water a compound with polar covalent bonds?",
"What is a compound with polar covalent bonds?"
],
[
"Such bonds are called polar @dash bonds",
"Are such bonds called polar covalent bonds?",
"What are called polar covalent bonds?"
],
[
"Therefore, the hydrogen is a @dash – polar molecule",
"Why is the hydrogen is a non – polar molecule?",
"Is the hydrogen a non – polar molecule?",
" What is a non – polar molecule?"
],
[
"Thus the molecule is polarized so that the oxygen atom bears a partial negative @dash while the hydrogen atom carries a partial positive @dash ",
"Is thus the molecule polarized so that the oxygen atom bears a partial negative charge while the hydrogen atom carries a partial positive charge?"
],
[
"Of them @dash pairs are bond pairs and @dash pairs are lone pairs",
"What are bond pairs and two pairs are lone pairs?"
],
[
"It takes different values for different @dash ",
"Does it takes different values for different atoms?"
],
[
"Such covalent bonds are referred to as non – @dash covalent bonds",
"Are such covalent bonds referred to as non – polar covalent bonds?",
"What are referred to as non – polar covalent bonds?"
],
[
"So, the @dash distribution is not symmetrical",
"Is so, the electron dtribution not symmetrical?"
],
[
"Consequently the fluorine atom bears a small @dash charge",
"Does consequently the fluorine atom bears a small negative charge?"
],
[
"In water molecules the hydrogen atoms which bear a very small positive partial charge forms attractive forces with oxygen atoms bearing a very small @dash charge of the neighbouring water molecules",
"Does in water molecules the hydrogen atoms which bear a very small positive partial charge forms attractive forces with oxygen atoms bearing a very small negative charge of the neighbouring water molecules?",
"Does in water molecules the hydrogen atoms which bear a very big positive partial charge forms attractive forces with oxygen atoms bearing a very big negative charge of the neighbouring water molecules?",
"What molecules the hydrogen atoms which bear a very small positive partial charge forms attractive forces with oxygen atoms bearing a very small negative charge of the neighbouring water molecules?"
],
[
"This kind of attractions among the molecules are known as intermolecular @dash or intermolecular bonds",
"What are known as intermolecular forces or intermolecular bonds?"
],
[
"In case that there were no intermolecular forces among the water molecules, water is a gas at @dash temperature",
"Is in case that there were no intermolecular forces among the water molecules, water a gas at room temperature?"
],
[
"These intermolecular forces are not as strong as the covalent bonds between the oxygen atoms and the hydrogen atoms in a water @dash ",
"Are these intermolecular forces not as strong as the covalent bonds between the oxygen atoms and the hydrogen atoms in a water molecule?",
"What are not as strong as the covalent bonds between the oxygen atoms and the hydrogen atoms in a water molecule?"
],
[
"Yet, these intermolecular @dash impart many special properties to water",
"Does yet, these intermolecular forces impart many special properties to water?"
],
[
"Because of these intermolecular forces, @dash exists as a liquid at room temperature"
],
[
"Are such changes called physical changes?",
"What are called physical changes?"
],
[
"Such changes are known as @dash changes or @dash reactions",
"Are such changes known as chemical changes or chemical reactions?",
"What are known as chemical changes or chemical reactions?"
],
[
"When writing chemical equations in the standard form, reactants are written on the left @dash side and the products on the right @dash side",
"Are when writing chemical equations in the standard form, reactants written on the left hand side and the products on the right hand side?"
],
[
"For a @dash there may be several reactants as well as several products",
"Does for a reaction, there may be several reactants as well as several products?"
],
[
"chemical equation is the symbolic @dash of a chemical reaction using chemical formulae",
"Is chemical equation the symbolic representation of a chemical reaction using chemical formulae?",
"What is the symbolic representation of a chemical reaction using chemical formulae?"
],
[
"The formation of a new compound by the combination of elements with elements or elements with compounds or compounds with compounds is known as a chemical combination @dash ",
"Is the formation of a new compound by the combination of elements with elements or elements with compounds or compounds with compounds known as a chemical combination reaction?",
"What is known as a chemical combination reaction?"
],
[
"When burnt in air, metals like sodium and magnesium react with @dash to form their oxides"
],
[
" @dash does not react fast with the components in air",
"Does magnesium does not react fast with the components in air?"
],
[
"It is seen that the lustrous nature of the @dash of sodium metal diminishes ",
"Is it seen that the lustrous nature of the surface of sodium metal diminhes ?"
],
[
"Prolonged heating turns them into the @dash ",
"What turns them into the oxides?"
],
[
"The reason for the disappearance of the shiny nature of the piece of sodium is that it reacts fast with the @dash in air",
"Is the reason for the dappearance of the shiny nature of the piece of sodium that it reacts fast with the components in air?"
"What is that it reacts fast with the components in air?"
],
[
"It is seen that @dash metal does not react with dilute hydrochloric acid",
"Is it seen that copper metal does not react with dilute hydrochloric acid?"
],
[
"This shows that the rate of reactions of @dash s with dilute acids differ according to the type of the @dash "
],
[
"When hydrogen chloride gas is dissolved in water the solution is called hydrochloric @dash ",
"Is when hydrogen chloride gas dsolved in water the solution called hydrochloric acid?",
"What the solution is called hydrochloric acid?"
],
[
"Many metals react with dilute sulphuric acid also liberating hydrogen @dash ",
"Does many metals react with dilute sulphuric acid also liberating hydrogen gas?",
"What also liberating hydrogen gas?"
],
[
"HCl is called hydrogen @dash when it exits as a gas",
"Is hcl called hydrogen chloride when it exits as a gas?"
],
[
"The activity series is built up on the @dash of those observations as well as other data",
"Is the activity series built up on the bas of those observations as well as other data?",
"What is built up on the basis of those observations as well as other data?"
],
[
"Because of their high reactivity with air, they react with the @dash in air if they are kept exposed to air",
"Are because of their high reactivity with air, they react with the components in air if they are kept exposed to air?"
],
[
"The activity series is very important in the @dash in chemistry",
"Is the activity series very important in the studies in chemtry?",
"What is very important in the studies in chemistry?"
],
[
"Is the series obtained by the arrangement of metals in the descending order of their reactivity referred to as the activity series?",
"What is referred to as the activity series?"
],
[
"Activity series is useful to identify the @dash to be taken when storing metals",
"Is activity series useful to identify the precautions to be taken when storing metals?",
"What is useful to identify the precautions to be taken when storing metals?"
],
[
"Hence, strong extraction methods such as electrolysis are used to extract metals at the top of the @dash series",
"Why is strong extraction methods such as electrolysis are used to extract metals at the top of the activity series?",
"Are strong extraction methods such as electrolysis used to extract metals at the top of the activity series?",
"What are used to extract metals at the top of the activity series?"
],
[
"Activity series helps select @dash to make electrochemical cells",
"Does activity series helps select metals to make electrochemical cells?",
"What helps select metals to make electrochemical cells?"
],
[
"More simpler physical methods are used to extract metals at the bottom of the @dash series",
"Are more simpler physical methods used to extract metals at the bottom of the activity series?",
"What are used to extract metals at the bottom of the activity series?"
],
[
"Activity series can be used to decide on the methods suitable for extracting @dash ",
"Can activity series be used to decide on the methods suitable for extracting metals?",
"What can be used to decide on the methods suitable for extracting metals?"
],
[
"They are extracted by the physical @dash used to separate the mixtures",
"Are they extracted by the physical methods used to separate the mixtures?"
],
[
"How the metals occur differs according to their @dash ",
"Does how the metals occur differs according to their reactivity?"
],
[
" @dash is extracted from the iron ore mined from the soil",
"Is iron extracted from the iron ore mined from the soil?"
],
[
"The temperature range within the blast furnace is 1000 0C – @dash 0C",
"Is the temperature range within the blast furnace 1000 0c – 1900 0c?",
"What is 1000 0C – 1900 0C?"
],
[
"The structure used to extract iron is called the blast @dash ",
"Is the structure used to extract iron called the blast furnace?",
"What is called the blast furnace?"
],
[
"Heating caused by hot air brings about several reactions inside the furnace giving @dash iron",
"Does heating caused by hot air brings about several reactions inside the furnace giving liquid iron?",
"What brings about several reactions inside the furnace giving liquid iron?"
],
[
"It is a special furnace @dash high",
"Is it a special furnace about 60 m high?"
],
[
"certain amount of impurities can be removed by sifting the @dash containing gold",
"Can certain amount of impurities be removed by sifting the ore containing gold?",
"What can be removed by sifting the ore containing gold?"
],
[
"Is the metal separated by such physical methods purified further by various methods?",
"Is the metal separated by such mental methods purified further by various methods?",
"What is purified further by various methods?"
],
[
"But it is mixed with other @dash ",
"Why is it is mixed with other impurities?",
"Is it mixed with other impurities?",
" What is mixed with other impurities?"
],
[
"Is the density of gold very high?",
"Is the density of gold very low?",
"What is very high?"
],
[
"Therefore it occurs as the @dash metal in nature",
"Why is it occurs as the native metal in nature?",
"Does therefore it occurs as the native metal in nature?",
" What occurs as the native metal in nature?"
],
[
"This gas brought the temperature of the @dash ’s atmosphere to an optimal level for living organisms and it also acts a raw material for photosynthesis, the process that meets the food requirement of all living beings",
"What brought the temperature of the Earth’s atmosphere to an optimal level for living organisms and it also acts a raw material for photosynthesis, the process that meets the food requirement of all living beings?"
],
[
"Carbon dioxide is a gas that contributed to the advent of life on @dash ",
"Is carbon dioxide a gas that contributed to the advent of life on earth?",
"What is a gas that contributed to the advent of life on Earth?"
],
[
"The lime water turns milky because the white calcium @dash so formed is suspended in water",
"What turns milky because the white calcium carbonate so formed is suspended in water?"
],
[
"Therefore the milkiness of the solution @dash ",
"Why is the milkiness of the solution disappears?",
" What disappears?"
],
[
"When carbon dioxide is cooled strongly under high @dash the gas solidifies",
"Is when carbon dioxide cooled strongly under high pressure, the gas solidifies?"
],
[
"It is also used to create artificial @dash ",
"Is it also used to create artificial rains?"
],
[
"Therefore solid carbon dioxide is known as @dash ice",
"Why is solid carbon dioxide is known as dry ice?",
"Is therefore solid carbon dioxide known as dry ice?",
" What is known as dry ice?"
],
[
"Dry ice is largely used in @dash preservation",
"Is dry ice largely used in food preservation?",
"What is largely used in food preservation?"
],
[
"That is, @dash dioxide has acted as a catalyst for this reaction"
],
[
" @dash dioxide has increased the rate of this reaction",
"What has increased the rate of this reaction?"
],
[
"Since the mass of @dash dioxide remains the same, it has not been consumed during the reaction"
],
[
"The speed of evolution of gas bubbles is higher in the test tube with @dash dioxide",
"Is the speed of evolution of gas bubbles higher in the test tube with manganese dioxide?",
"What is higher in the test tube with manganese dioxide?"
],
]
# questionListTest = ["What are the two main categories of the environment ?",
# "What is the definition of matter ?",
# "What are the physical classifications of matter ?",
# "What are the chemical classifications of matter ? ",
# "What is the build unit of matter ? ",
# "What are the main subatomic particles ? ",
# "Who invented the atomic mode ? ",
# "What is the charge of nucleon ? ",
# "What is the charge of proton ? ",
# "What is the charge of electron ? ",
# "Why is atom electrically neutral ? ",
# "Who invented the planetary model ? ",
# "What is planetary model ? ",
# "Why don't the electrons fall on nucleons ? ",
# "What is neils bohr theory ? ",
# "What are energy levels ? ",
# "How many electrons are there in K,L,M,N energy levels ? ",
# "What is the definition of atomic number ? ",
# "What does the atomic number equal to of an element ? ",
# "Is the number of protons in different elements different ? ",
# "Is the atomic number a unique characteristic of that element ? ",
# "What are ions ? ",
# "Is the atomic number equal to the number of electrons ? ",
# "What are subatomic particle ? ",
# "Is the mass of protons and neutrons nearly equal ? ",
# "what does the mass of an atom depend on ? ",
# "What is mass number ? ",
# "What is the definition of electronic configuration ? ",
# "Who invented the periodic table ? ",
# "What does the periodic law state ? ",
# "What are groups and periods in a periodic table ? ",
# "What are valence electrons ? ",
# "Whats is an Isotope ? ",
# "What are the patterns found across the periodic table ? ",
# "Which group elements have minimum first ionization energy ? ",
# "Which group elements have maximum first ionization energy ? ",
# "What are the properties of metal ? ",
# "What are the chemical properties of metals ? ",
# "What type of metal is Sodium ? ",
# "where do we store sodium ? ",
# "In what form we can obtain Magnesium from sea ? ",
# "What is the main compound found in sea which contains Sodium ? ",] | [
"[email protected]"
] | |
1952f0c0642ab04555dee256c95b94ae4b6b275c | 3572a8576eb28b5ed9d06b27ecfc756de1e7a40a | /utils/handlers.py | aae4ba9595097f5b0de5f65e5b2499d3556dd3aa | [] | no_license | waytobehigh/classification-labeling-tool | a18488f47e01e5f636a278a67e181a76945a5c1f | 2095a247dd07359377cafe91399ac1a8ce6a9339 | refs/heads/master | 2022-12-07T19:00:55.937426 | 2020-08-17T19:32:04 | 2020-08-17T19:32:04 | 285,910,362 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | import streamlit
from PIL import Image
def handle_image(path: str, width: int = 225, height: int = 225):
image = Image.open(path)
resized = image.resize((width, height))
streamlit.image(resized)
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.