blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
af7acd4198ed01f248b585d74da066970f4bb5f5 | c0960d3ebef9b4167c7f7581c50adcfb66cc02fa | /app/importer/handlers.py | d75f7a5ec82761a43f049d0ba25491c38ec31d2b | [] | no_license | hebkhan/server | 8358c3970322383473e4d7165447ccc800d864be | 3150d9316489da638a19470085ca97061c3592ee | refs/heads/master | 2021-06-13T15:27:01.571952 | 2015-08-03T22:09:43 | 2015-08-03T22:09:43 | 13,910,197 | 1 | 2 | null | 2016-02-29T21:23:07 | 2013-10-27T20:57:13 | Python | UTF-8 | Python | false | false | 7,296 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, with_statement
import json
from collections import defaultdict
import copy
from google.appengine.api import users
from google.appengine.ext import db
from request_handler import RequestHandler
from user_util import dev_server_only
from models import UserData, UserVideo, VideoLog, UserExercise, ProblemLog
from goals.models import Goal
from api.jsonify import jsonify
from api.auth.tests.test import TestOAuthClient
from oauth_provider.oauth import OAuthToken
try:
import secrets, secrets_dev
except:
class secrets(object):
pass
secrets_dev = secrets
class ImportHandler(RequestHandler):
"""Import data for a particular user to the dev datastore from production.
Existing data will be overwritten. Please never use this in production!
Also, don't rely on everything working. Some fields aren't exposed by the
API, and this simply reads the API. Improvements welcome! :)
To use this, you need to ensure that secrets.py contains ka_api_consumer_key
and ka_api_consumer_secret. Also, you need to put your access token in
secrets_dev.py as ka_api_token_key and ka_api_token_secret. See setup_oauth
for details.
"""
access_token = ""
client = None
email = ""
_default_kinds = {
'UserVideo': 1,
'VideoLog': 1000,
'UserExercise': 1,
'UserExercise': 1,
'ProblemLog': 1,
'Goal': 1000,
}
@dev_server_only
def get(self):
if not hasattr(secrets, 'ka_api_consumer_key') or \
not hasattr(secrets, 'ka_api_consumer_secret') or \
not hasattr(secrets_dev, 'ka_api_token_key') or \
not hasattr(secrets_dev, 'ka_api_token_secret'):
return self.redirect("/")
self.setup_oauth()
self.email = self.request_string("email")
if not self.email:
raise "Must supply email for user to import"
params = copy.copy(self._default_kinds)
params.update(self.request.params)
# get proper user from addition 1 userexercise
user_id_json = json.loads(self.api("/api/v1/user/exercises/addition_1"))
user = users.User(user_id_json['user'])
# UserData
user_data_json_raw = self.api("/api/v1/user")
user_data = UserData.from_json(json.loads(user_data_json_raw), user=user)
self.output('user_data', user_data, user_data_json_raw)
user_data.put()
if 'UserVideo' in params:
user_videos_json = json.loads(self.api("/api/v1/user/videos"))
user_videos = []
for user_video_json in user_videos_json[:params['UserVideo']]:
user_video = UserVideo.from_json(user_video_json, user_data=user_data)
user_videos.append(user_video)
self.output('user_video', user_video, jsonify(user_video_json))
video_logs = defaultdict(list)
if 'VideoLog' in params:
for user_video in user_videos:
ytid = user_video.video.youtube_id
video_logs_json = json.loads(
self.api("/api/v1/user/videos/%s/log" % ytid))
for video_log_json in video_logs_json[:params['ProblemLog']]:
video_log = VideoLog.from_json(video_log_json, user_video.video, user)
video_logs[user_video].append(video_log)
self.output("video_log", video_log, jsonify(video_log_json))
# delete old video logs
query = VideoLog.all(keys_only=True)
query.filter('user =', user)
db.delete(query.fetch(10000))
db.put(user_videos)
for k, v in video_logs.iteritems():
db.put(v)
if 'UserExercise' in params:
user_exercises_json = json.loads(self.api("/api/v1/user/exercises"))
user_exercises = []
for user_exercise_json in user_exercises_json[:params['UserExercise']]:
user_exercise = UserExercise.from_json(user_exercise_json, user_data)
if user_exercise:
user_exercises.append(user_exercise)
self.output("user_exercise", user_exercise, jsonify(user_exercise_json))
problem_logs = defaultdict(list)
if 'ProblemLog' in params:
for user_exercise in user_exercises:
problem_logs_json = json.loads(self.api(
"/api/v1/user/exercises/%s/log" % user_exercise.exercise))
for problem_log_json in problem_logs_json[:params['ProblemLog']]:
problem_log = ProblemLog.from_json(problem_log_json,
user_data=user_data,
exercise=user_exercise.exercise_model)
problem_logs[user_exercise].append(problem_log)
self.output("problem_log", problem_log,
jsonify(problem_log_json))
db.put(user_exercises)
for k, v in problem_logs.iteritems():
db.put(v)
if 'Goal' in params:
with AutoNowDisabled(Goal):
goals_json = json.loads(self.api("/api/v1/user/goals"))
goals = []
for goal_json in goals_json[:params['Goal']]:
goal = Goal.from_json(goal_json, user_data=user_data)
goals.append(goal)
self.output("goal", goal, jsonify(goal_json))
db.put(goals)
# need to tell the userdata that it has goals
user_data.has_current_goals = not all([g.completed for g in goals])
user_data.put()
def output(self, name, obj, json_raw):
self.response.write("//--- %s \n" % name)
self.response.write(json_raw)
self.response.write("\n")
self.response.write(jsonify(obj))
self.response.write("\n\n")
def setup_oauth(self, url="http://www.khanacademy.org"):
self.client = TestOAuthClient(url, secrets.ka_api_consumer_key,
secrets.ka_api_consumer_secret)
request_token = OAuthToken(secrets_dev.ka_api_token_key,
secrets_dev.ka_api_token_secret)
self.access_token = self.client.fetch_access_token(request_token)
def api(self, url, email=""):
email = email or self.email
if email:
url += "?email=%s" % email
return self.client.access_resource(url, self.access_token)
class AutoNowDisabled(object):
'''ContextManager that temporarily disables auto_now on properties like
DateTimeProperty. This is useful for importing entites to different
datastores'''
def __init__(self, klass):
self.klass = klass
def __enter__(self,):
self.existing = {}
for name, prop in self.klass.properties().iteritems():
if hasattr(prop, 'auto_now'):
self.existing[prop] = prop.auto_now
prop.auto_now = False
return self.klass
def __exit__(self, type, value, traceback):
for prop, value in self.existing.iteritems():
prop.auto_now = value
| [
"[email protected]"
] | |
ad38dcb07d878c84f5b81bd9944ce5bddbff6853 | 6548cb72a1ec9fad2835be2d039c9893d4cc7f0c | /CodeForces/HelpfulMaths/HelpfulMaths.py | 2cd48d0d50ecbe26935ae7c126b60e7adaa53e28 | [
"MIT"
] | permissive | bdugersuren/algorithmic-problems | 13640bf5a22a916fde8c4049d3e5e53e707fabdc | d2b3965023deafa018d783f5ce0d23dae1c890bc | refs/heads/main | 2023-03-24T12:27:10.860335 | 2021-03-21T19:09:01 | 2021-03-21T19:09:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 320 | py | def main():
expression = input()
ones = 0
twos = 0
thrids = 0
for elem in expression:
if elem == '1':
ones += 1
elif elem == '2':
twos += 1
elif elem == '3':
thrids += 1
print('+'.join(['1']*ones + ['2']*twos + ['3']*thrids))
main()
| [
"[email protected]"
] | |
c775dfbdd9e3ff016859c95b19ca5cf1516715e8 | 159aed4755e47623d0aa7b652e178296be5c9604 | /data/scripts/templates/object/draft_schematic/scout/shared_item_trap_flash_bomb.py | 75d25515e7c8511a1bb926e66454877f7bd0ff4e | [
"MIT"
] | permissive | anhstudios/swganh | fb67d42776864b1371e95f769f6864d0784061a3 | 41c519f6cdef5a1c68b369e760781652ece7fec9 | refs/heads/develop | 2020-12-24T16:15:31.813207 | 2016-03-08T03:54:32 | 2016-03-08T03:54:32 | 1,380,891 | 33 | 44 | null | 2016-03-08T03:54:32 | 2011-02-18T02:32:45 | Python | UTF-8 | Python | false | false | 455 | py | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/scout/shared_item_trap_flash_bomb.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | [
"[email protected]"
] | |
a715b8049176e74e25d7d6d5ee692ff8b226db1e | 5a33ee1df4e82bb7f6a49c66898bea0d79fc91af | /API/views.py | 1d0c9637992799fe8d327e5c8d79ccefb8863bb0 | [] | no_license | Yousef115/django_event_planner | 4ddfbadd4f216734930c0b3b77ab80d5f467131a | cd4479e29e0a3574c39eca967e3e1523bb6eebe9 | refs/heads/master | 2020-07-22T14:25:53.730673 | 2019-09-12T13:20:32 | 2019-09-12T13:20:32 | 207,232,605 | 0 | 0 | null | 2019-09-09T05:43:03 | 2019-09-09T05:43:02 | null | UTF-8 | Python | false | false | 3,100 | py | from datetime import datetime
from django.contrib.auth.models import User
from rest_framework.generics import ListAPIView, RetrieveAPIView, CreateAPIView, DestroyAPIView, RetrieveUpdateAPIView
from rest_framework.filters import SearchFilter
from rest_framework.permissions import IsAuthenticated
from events.models import Booking, Event, Profile
from .permissions import IsOwner
from .serializers import (EventsListSerializer, EventDetailsSerializer, CreateEventSerializer,
BookEventSerializer, BookingDetailsSerializer, UserSerializer,
RegisterSerializer, EventOwnerDetailSerializer)
#OK
class EventsList(ListAPIView):
queryset = Event.objects.all()
serializer_class = EventsListSerializer
filter_backends = [SearchFilter,]
search_fields = ['owner__username']
def get_queryset(self):
today = datetime.today()
return Event.objects.filter(date__gte=today)
#OK
class EventDetails(RetrieveAPIView):
queryset = Event.objects.all()
lookup_field = 'id'
lookup_url_kwarg = 'event_id'
def get_serializer_class(self):
print (self.kwargs['event_id'])
event = Event.objects.get(id=self.kwargs['event_id'])
if self.request.user == event.owner:
return EventOwnerDetailSerializer
else:
return EventDetailsSerializer
#OK
class CreateEvent(CreateAPIView):
serializer_class = CreateEventSerializer
permission_classes = [IsAuthenticated]
def perform_create(self, serializer):
today = datetime.today()
serializer.save(owner=self.request.user,created_on=today)
#OK
class ModifyEvent(RetrieveUpdateAPIView):
queryset = Event.objects.all()
serializer_class = CreateEventSerializer
lookup_field = 'id'
lookup_url_kwarg = 'event_id'
permission_classes = [IsAuthenticated, IsOwner]
class BookingsList(ListAPIView):
#queryset = Booking.objects.all()
serializer_class = BookingDetailsSerializer
permission_classes = [IsAuthenticated]
def get_queryset(self):
today = datetime.today()
return Booking.objects.filter(owner=self.request.user)
#OK
class BookEvent(CreateAPIView):
serializer_class = BookEventSerializer
permission_classes = [IsAuthenticated]
def perform_create(self, serializer):
print (self.request.user)
serializer.save(owner=self.request.user, event_id=self.kwargs['event_id'])
class ModifyBooking(RetrieveUpdateAPIView):
queryset = Booking.objects.all()
serializer_class = BookEventSerializer
lookup_field = 'id'
lookup_url_kwarg = 'booking_id'
class Profile(RetrieveAPIView):
serializer_class = UserSerializer
def get_object(self):
return self.request.user
class Register(CreateAPIView):
serializer_class = RegisterSerializer
# class Follow(RetrieveUpdateAPIView):
# queryset = User.objects.all()
# serializer_class = CreateEventSerializer
# lookup_field = 'id'
# lookup_url_kwarg = 'user_id'
# permission_classes = [IsAuthenticated,]
# class following(CreateAPIView):
# serializer_class = FollowSerializer
# permission_classes = [IsAuthenticated]
# def perform_create(self, serializer):
# print (self.request.user)
# serializer.save(owner=self.request.user, event_id=self.kwargs['event_id'])
| [
"[email protected]"
] | |
b1056639836d47b357a4e4a0c419906aac5c4a3d | 0b3b56a51ae32269ac41e47a0db38ed8c429e936 | /1. Programación Orientada a Objetos/Ensayo examen/Obrero.py | fb8d8c7cb016eb20b77d07f47eb3eb0e5d5167e2 | [] | no_license | asunawesker/EEParadigmasDeProgramacion | 1e2030beca85aa64b8923e63a5f9ec59a624d1ee | be05028923fdb9459c1bf849160b62280479e8b1 | refs/heads/main | 2023-05-12T03:43:56.758563 | 2021-06-02T16:07:51 | 2021-06-02T16:07:51 | 373,226,197 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,233 | py | from Empleado import *
class Obrero(Empleado):
def __init__(self, asistencia = 0, sueldo = 10000, bono = 600):
self.asistencia = asistencia
self.sueldo = sueldo
self.bono = bono
def checarAsistencia(self):
return self.asistencia + 1
def bonoMensual(self, mes):
if (mes == 1 or mes == 3 or mes == 5 or mes == 7 or mes == 8 or mes == 1 or mes == 10 or mes == 12) and self.asistencia == 31:
return 'SI'
elif (mes == 4 or mes == 6 or mes == 9 or mes == 11) and self.asistencia == 30:
return 'SI'
elif mes == 2 and (self.asistencia == 28 or self.asistencia == 29):
return 'SI'
else:
return 'NO'
def sueldoTotal(self, mes):
if (mes == 1 or mes == 3 or mes == 5 or mes == 7 or mes == 8 or mes == 1 or mes == 10 or mes == 12) and self.asistencia == 31:
return self.sueldo + self.bono
elif (mes == 4 or mes == 6 or mes == 9 or mes == 11) and self.asistencia == 30:
return self.sueldo + self.bono
elif mes == 2 and (self.asistencia == 28 or self.asistencia == 29):
return self.sueldo + self.bono
else:
return self.sueldo | [
"[email protected]"
] | |
8c7f2a5dd1a987c2491baa94b1076c12faadad88 | 02626713a8b08b6441a44544b5d512c5c4b5b7c8 | /wsgi.py | 97b205cd3ddec5c7ae649a3a04fa02d0301f8147 | [] | no_license | chitrankdixit/freshdesk-flyrobe-app | 56240f0f81e57606fb5db09ab5e61438958bb66d | d4adb310a052a5505a250b9c7934ca7ae3205831 | refs/heads/master | 2022-12-14T20:52:31.738845 | 2018-02-05T12:55:47 | 2018-02-05T12:55:47 | 119,705,056 | 0 | 1 | null | 2021-06-01T21:48:42 | 2018-01-31T15:26:10 | Python | UTF-8 | Python | false | false | 71 | py | from freshdesk_app import app
if __name__ == "__main__":
app.run() | [
"[email protected]"
] | |
a218b140548d16333d767b2b2c4efa1b295e04a9 | f662bd04d2f29ef25bbfd7e768b1e57dfbba4d9f | /apps/evaluacion/migrations/0001_initial.py | a7633e204c144016e727cc9cc12c44548fc6d301 | [] | no_license | DARKDEYMON/sisevadoc | f59b193688f7eca7c140a03ee414f5d20ada78c7 | 9fc0943200986824a2aab2134fdba5c9f3315798 | refs/heads/master | 2020-03-19T03:27:07.907125 | 2019-12-11T13:30:43 | 2019-12-11T13:30:43 | 135,729,070 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,204 | py | # Generated by Django 2.0.8 on 2018-10-08 14:13
import apps.evaluacion.models
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import smart_selects.db_fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('academico', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='comision',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('apellidos', models.CharField(max_length=50)),
('nombres', models.CharField(max_length=50)),
('ci', models.CharField(max_length=10)),
('veedor', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='cuestionario_aevaluacion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pregunta_1', models.PositiveIntegerField(choices=[(1, 'Nunca'), (2, 'Casi Nunca'), (3, 'A Veces'), (4, 'Casi Siempre'), (5, 'Siempre')], verbose_name='¿El docente cumple con el horario de clases establecido?')),
('pregunta_2', models.PositiveIntegerField(choices=[(1, 'Nunca'), (2, 'Casi Nunca'), (3, 'A Veces'), (4, 'Casi Siempre'), (5, 'Siempre')], verbose_name='¿El docente presenta el Plan de asignatura a los estudiantes al inicio de la actividad académica?')),
('pregunta_3', models.PositiveIntegerField(choices=[(1, 'Nunca'), (2, 'Casi Nunca'), (3, 'A Veces'), (4, 'Casi Siempre'), (5, 'Siempre')], verbose_name='¿El docente concluye los contenidos temáticos y/o actividades propuestos en el Plan de asignatura?')),
('pregunta_4', models.PositiveIntegerField(choices=[(1, 'Nunca'), (2, 'Casi Nunca'), (3, 'A Veces'), (4, 'Casi Siempre'), (5, 'Siempre')], verbose_name='¿El docente promueve el análisis crítico, la creatividad y el aprendizaje independiente en los estudiantes?')),
('pregunta_5', models.PositiveIntegerField(choices=[(1, 'Nunca'), (2, 'Casi Nunca'), (3, 'A Veces'), (4, 'Casi Siempre'), (5, 'Siempre')], verbose_name='¿El docente se relaciona con los estudiantes sin discriminación de raza, género, clase social, posición económica, credo religioso o ideología-política.?')),
('pregunta_6', models.PositiveIntegerField(choices=[(1, 'Nunca'), (2, 'Casi Nunca'), (3, 'A Veces'), (4, 'Casi Siempre'), (5, 'Siempre')], verbose_name='¿El docente motiva a los alumnos para la participación activa del mismo en clases?')),
('pregunta_7', models.PositiveIntegerField(choices=[(1, 'Nunca'), (2, 'Casi Nunca'), (3, 'A Veces'), (4, 'Casi Siempre'), (5, 'Siempre')], verbose_name='¿El docente presta atención individual o colectiva a los alumnos que solicitan consulta fuera de horarios de clases?')),
('pregunta_8', models.PositiveIntegerField(choices=[(1, 'Nunca'), (2, 'Casi Nunca'), (3, 'A Veces'), (4, 'Casi Siempre'), (5, 'Siempre')], verbose_name='¿El docente utiliza medios educativos de apoyo al aprendizaje, tales como Pizarra, retroproyector u otros?')),
('pregunta_9', models.PositiveIntegerField(choices=[(1, 'Nunca'), (2, 'Casi Nunca'), (3, 'A Veces'), (4, 'Casi Siempre'), (5, 'Siempre')], verbose_name='¿El docente facilita texto guía, apuntes, guías de ejercicios prácticos u otros materiales preparados por él?')),
('pregunta_10', models.PositiveIntegerField(choices=[(1, 'Nunca'), (2, 'Casi Nunca'), (3, 'A Veces'), (4, 'Casi Siempre'), (5, 'Siempre')], verbose_name='¿El docente proporciona la Bibliografía básica contenida en el Plan de asignatura?')),
('pregunta_11', models.PositiveIntegerField(choices=[(1, 'Nunca'), (2, 'Casi Nunca'), (3, 'A Veces'), (4, 'Casi Siempre'), (5, 'Siempre')], verbose_name='¿El docente valora los conocimientos previos de los estudiantes, respecto a las asignaturas precedentes?')),
('pregunta_12', models.PositiveIntegerField(choices=[(1, 'Nunca'), (2, 'Casi Nunca'), (3, 'A Veces'), (4, 'Casi Siempre'), (5, 'Siempre')], verbose_name='¿El docente implementa procedimientos adecuados para la evaluación del aprendizaje?')),
('pregunta_13', models.PositiveIntegerField(choices=[(1, 'Nunca'), (2, 'Casi Nunca'), (3, 'A Veces'), (4, 'Casi Siempre'), (5, 'Siempre')], verbose_name='¿El docente elabora las preguntas de examen en correspondencia con los temas avanzados en la asignatura?')),
('pregunta_14', models.PositiveIntegerField(choices=[(1, 'Nunca'), (2, 'Casi Nunca'), (3, 'A Veces'), (4, 'Casi Siempre'), (5, 'Siempre')], verbose_name='¿El docente asigna al estudiante el tiempo suficiente y necesario para la resolución del examen aplicado?')),
('pregunta_15', models.PositiveIntegerField(choices=[(1, 'Nunca'), (2, 'Casi Nunca'), (3, 'A Veces'), (4, 'Casi Siempre'), (5, 'Siempre')], verbose_name='¿El docente informa a los estudiantes sobre los resultados y debilidades identificadas en la evaluación, en un plazo no mayor a los 10 días?')),
('pregunta_16', models.PositiveIntegerField(choices=[(1, 'Nunca'), (2, 'Casi Nunca'), (3, 'A Veces'), (4, 'Casi Siempre'), (5, 'Siempre')], verbose_name='¿El docente domina los temas de la asignatura?')),
('pregunta_17', models.PositiveIntegerField(choices=[(1, 'Nunca'), (2, 'Casi Nunca'), (3, 'A Veces'), (4, 'Casi Siempre'), (5, 'Siempre')], verbose_name='¿El docente aclara las preguntas efectuadas por los alumnos sobre los temas avanzados?')),
('pregunta_18', models.PositiveIntegerField(choices=[(1, 'Nunca'), (2, 'Casi Nunca'), (3, 'A Veces'), (4, 'Casi Siempre'), (5, 'Siempre')], verbose_name='¿El docente tiene claridad expositiva en el desarrollo de los temas?')),
('pregunta_19', models.PositiveIntegerField(choices=[(1, 'Nunca'), (2, 'Casi Nunca'), (3, 'A Veces'), (4, 'Casi Siempre'), (5, 'Siempre')], verbose_name='¿El docente desarrolla los temas en una secuencia lógica?')),
('creacion', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='cuestionario_alumno',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pregunta_1', models.PositiveIntegerField(choices=[(1, 'Nunca'), (2, 'Casi Nunca'), (3, 'A Veces'), (4, 'Casi Siempre'), (5, 'Siempre')], verbose_name='¿El docente cumple con el horario de clases establecido?')),
('pregunta_2', models.PositiveIntegerField(choices=[(1, 'Nunca'), (2, 'Casi Nunca'), (3, 'A Veces'), (4, 'Casi Siempre'), (5, 'Siempre')], verbose_name='¿El docente presenta el Plan de asignatura a los estudiantes al inicio de la actividad académica?')),
('pregunta_3', models.PositiveIntegerField(choices=[(1, 'Nunca'), (2, 'Casi Nunca'), (3, 'A Veces'), (4, 'Casi Siempre'), (5, 'Siempre')], verbose_name='¿El docente concluye los contenidos temáticos y/o actividades propuestos en el Plan de asignatura?')),
('pregunta_4', models.PositiveIntegerField(choices=[(1, 'Nunca'), (2, 'Casi Nunca'), (3, 'A Veces'), (4, 'Casi Siempre'), (5, 'Siempre')], verbose_name='¿El docente promueve el análisis crítico, la creatividad y el aprendizaje independiente en los estudiantes?')),
('pregunta_5', models.PositiveIntegerField(choices=[(1, 'Nunca'), (2, 'Casi Nunca'), (3, 'A Veces'), (4, 'Casi Siempre'), (5, 'Siempre')], verbose_name='¿El docente se relaciona con los estudiantes sin discriminación de raza, género, clase social, posición económica, credo religioso o ideología-política.?')),
('pregunta_6', models.PositiveIntegerField(choices=[(1, 'Nunca'), (2, 'Casi Nunca'), (3, 'A Veces'), (4, 'Casi Siempre'), (5, 'Siempre')], verbose_name='¿El docente motiva a los alumnos para la participación activa del mismo en clases?')),
('pregunta_7', models.PositiveIntegerField(choices=[(1, 'Nunca'), (2, 'Casi Nunca'), (3, 'A Veces'), (4, 'Casi Siempre'), (5, 'Siempre')], verbose_name='¿El docente presta atención individual o colectiva a los alumnos que solicitan consulta fuera de horarios de clases?')),
('pregunta_8', models.PositiveIntegerField(choices=[(1, 'Nunca'), (2, 'Casi Nunca'), (3, 'A Veces'), (4, 'Casi Siempre'), (5, 'Siempre')], verbose_name='¿El docente utiliza medios educativos de apoyo al aprendizaje, tales como Pizarra, retroproyector u otros?')),
('pregunta_9', models.PositiveIntegerField(choices=[(1, 'Nunca'), (2, 'Casi Nunca'), (3, 'A Veces'), (4, 'Casi Siempre'), (5, 'Siempre')], verbose_name='¿El docente facilita texto guía, apuntes, guías de ejercicios prácticos u otros materiales preparados por él?')),
('pregunta_10', models.PositiveIntegerField(choices=[(1, 'Nunca'), (2, 'Casi Nunca'), (3, 'A Veces'), (4, 'Casi Siempre'), (5, 'Siempre')], verbose_name='¿El docente proporciona la Bibliografía básica contenida en el Plan de asignatura?')),
('pregunta_11', models.PositiveIntegerField(choices=[(1, 'Nunca'), (2, 'Casi Nunca'), (3, 'A Veces'), (4, 'Casi Siempre'), (5, 'Siempre')], verbose_name='¿El docente valora los conocimientos previos de los estudiantes, respecto a las asignaturas precedentes?')),
('pregunta_12', models.PositiveIntegerField(choices=[(1, 'Nunca'), (2, 'Casi Nunca'), (3, 'A Veces'), (4, 'Casi Siempre'), (5, 'Siempre')], verbose_name='¿El docente implementa procedimientos adecuados para la evaluación del aprendizaje?')),
('pregunta_13', models.PositiveIntegerField(choices=[(1, 'Nunca'), (2, 'Casi Nunca'), (3, 'A Veces'), (4, 'Casi Siempre'), (5, 'Siempre')], verbose_name='¿El docente elabora las preguntas de examen en correspondencia con los temas avanzados en la asignatura?')),
('pregunta_14', models.PositiveIntegerField(choices=[(1, 'Nunca'), (2, 'Casi Nunca'), (3, 'A Veces'), (4, 'Casi Siempre'), (5, 'Siempre')], verbose_name='¿El docente asigna al estudiante el tiempo suficiente y necesario para la resolución del examen aplicado?')),
('pregunta_15', models.PositiveIntegerField(choices=[(1, 'Nunca'), (2, 'Casi Nunca'), (3, 'A Veces'), (4, 'Casi Siempre'), (5, 'Siempre')], verbose_name='¿El docente informa a los estudiantes sobre los resultados y debilidades identificadas en la evaluación, en un plazo no mayor a los 10 días?')),
('pregunta_16', models.PositiveIntegerField(choices=[(1, 'Nunca'), (2, 'Casi Nunca'), (3, 'A Veces'), (4, 'Casi Siempre'), (5, 'Siempre')], verbose_name='¿El docente domina los temas de la asignatura?')),
('pregunta_17', models.PositiveIntegerField(choices=[(1, 'Nunca'), (2, 'Casi Nunca'), (3, 'A Veces'), (4, 'Casi Siempre'), (5, 'Siempre')], verbose_name='¿El docente aclara las preguntas efectuadas por los alumnos sobre los temas avanzados?')),
('pregunta_18', models.PositiveIntegerField(choices=[(1, 'Nunca'), (2, 'Casi Nunca'), (3, 'A Veces'), (4, 'Casi Siempre'), (5, 'Siempre')], verbose_name='¿El docente tiene claridad expositiva en el desarrollo de los temas?')),
('pregunta_19', models.PositiveIntegerField(choices=[(1, 'Nunca'), (2, 'Casi Nunca'), (3, 'A Veces'), (4, 'Casi Siempre'), (5, 'Siempre')], verbose_name='¿El docente desarrolla los temas en una secuencia lógica?')),
('creacion', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='cuestionario_dcarrera',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pregunta_1', models.PositiveIntegerField(choices=[(1, 'Nunca'), (2, 'Casi Nunca'), (3, 'A Veces'), (4, 'Casi Siempre'), (5, 'Siempre')], verbose_name='¿El docente cumple con el horario de clases establecido?')),
('pregunta_2', models.PositiveIntegerField(choices=[(1, 'Nunca'), (2, 'Casi Nunca'), (3, 'A Veces'), (4, 'Casi Siempre'), (5, 'Siempre')], verbose_name='¿El docente presenta el Plan de asignatura a los estudiantes al inicio de la actividad académica?')),
('pregunta_3', models.PositiveIntegerField(choices=[(1, 'Nunca'), (2, 'Casi Nunca'), (3, 'A Veces'), (4, 'Casi Siempre'), (5, 'Siempre')], verbose_name='¿El docente concluye los contenidos temáticos y/o actividades propuestos en el Plan de asignatura?')),
('pregunta_4', models.PositiveIntegerField(choices=[(1, 'Nunca'), (2, 'Casi Nunca'), (3, 'A Veces'), (4, 'Casi Siempre'), (5, 'Siempre')], verbose_name='¿El docente publica y entrega las calificaciones, en un plazo no mayor a los 10 días?')),
('pregunta_5', models.PositiveIntegerField(choices=[(1, 'Nunca'), (2, 'Casi Nunca'), (3, 'A Veces'), (4, 'Casi Siempre'), (5, 'Siempre')], verbose_name='¿El docente participa en forma efectiva en tutorías y tribunal de graduación?')),
('pregunta_6', models.PositiveIntegerField(choices=[(1, 'Nunca'), (2, 'Casi Nunca'), (3, 'A Veces'), (4, 'Casi Siempre'), (5, 'Siempre')], verbose_name='¿El docente cumple en las tareas asignadas por el Director de Carrera?')),
('creacion', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='evaluacion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('gestion', models.PositiveIntegerField(default=2018, validators=[django.core.validators.RegexValidator(code='dato solo numerico', message='El año solo contiene cuatro digitos numericos', regex='^[0-9]{4}$')])),
('numero_alumnos', models.PositiveIntegerField(validators=[django.core.validators.MinValueValidator(1)])),
('creacion', models.DateTimeField(auto_now=True)),
('estado', models.BooleanField(default=True)),
('observaciones', models.TextField(blank=True, null=True)),
('carrera', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='academico.carreras')),
('docente', smart_selects.db_fields.ChainedForeignKey(auto_choose=True, chained_field='carrera', chained_model_field='carrera', on_delete=django.db.models.deletion.CASCADE, to='academico.docentes')),
('materia', smart_selects.db_fields.ChainedForeignKey(auto_choose=True, chained_field='carrera', chained_model_field='carrera', on_delete=django.db.models.deletion.CASCADE, to='academico.materias')),
],
),
migrations.CreateModel(
name='token_aevaluacion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('numero_ran', models.BigIntegerField(default=apps.evaluacion.models.random_token)),
('usado', models.BooleanField(default=False)),
('creacion', models.DateTimeField(auto_now=True)),
('evaluacion', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='evaluacion.evaluacion')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='token_alumno',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('numero_ran', models.BigIntegerField(default=apps.evaluacion.models.random_token)),
('usado', models.BooleanField(default=False)),
('creacion', models.DateTimeField(auto_now=True)),
('evaluacion', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='evaluacion.evaluacion')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='token_dcarrera',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('numero_ran', models.BigIntegerField(default=apps.evaluacion.models.random_token)),
('usado', models.BooleanField(default=False)),
('creacion', models.DateTimeField(auto_now=True)),
('evaluacion', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='evaluacion.evaluacion')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='cuestionario_dcarrera',
name='evaluacion',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='evaluacion.evaluacion'),
),
migrations.AddField(
model_name='cuestionario_alumno',
name='evaluacion',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='evaluacion.evaluacion'),
),
migrations.AddField(
model_name='cuestionario_aevaluacion',
name='evaluacion',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='evaluacion.evaluacion'),
),
migrations.AddField(
model_name='comision',
name='evaluacion',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='evaluacion.evaluacion'),
),
migrations.AlterUniqueTogether(
name='evaluacion',
unique_together={('docente', 'gestion', 'materia')},
),
]
| [
"[email protected]"
] | |
cd46ec9a9ecfc69e26b384cdea6a8d7002647494 | bd90c21f553362afbedb8ca629b14e8fa7b21dae | /tests/unit/python/foglamp/services/common/test_avahi.py | 9a4e7f3dd7ff03369e03a55f29d24fa3f58090ca | [
"Apache-2.0"
] | permissive | christoofar/FogLAMP | 631116d7e251c594a61765af46da8d38e77fe0e5 | 3aaae302104038a8534c54ff8a3ed0fefd4f3201 | refs/heads/develop | 2020-04-12T16:54:56.141584 | 2018-12-20T10:17:10 | 2018-12-20T10:17:10 | 162,627,944 | 0 | 0 | Apache-2.0 | 2018-12-20T20:30:37 | 2018-12-20T20:25:57 | Python | UTF-8 | Python | false | false | 1,717 | py | # -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
import pytest
import foglamp.services.common.avahi as avahi
def test_byte_array_to_string():
array = [104,101,108,108,111]
str = avahi.byte_array_to_string(array)
assert str == 'hello'
def test_byte_array_to_string_unprintable():
array = [104,101,108,108,111,12]
str = avahi.byte_array_to_string(array)
assert str == 'hello.'
def test_txt_array_string_array():
a1 = [104,101,108,108,111]
a2 = [104,101,108,108,111]
strs = avahi.txt_array_to_string_array([a1, a2])
assert strs[0] == 'hello'
assert strs[1] == 'hello'
def test_string_to_byte_array():
array = avahi.string_to_byte_array('hello')
assert array[0] == 104
assert array[1] == 101
assert array[2] == 108
assert array[3] == 108
assert array[4] == 111
def test_string_array_to_txt_array():
arrays = avahi.string_array_to_txt_array(['hello','hello'])
array = arrays[0]
assert array[0] == 104
assert array[1] == 101
assert array[2] == 108
assert array[3] == 108
assert array[4] == 111
array = arrays[1]
assert array[0] == 104
assert array[1] == 101
assert array[2] == 108
assert array[3] == 108
assert array[4] == 111
def test_dict_to_txt_array():
dict = { "hello" : "world" }
arrays = avahi.dict_to_txt_array(dict)
array = arrays[0]
assert array[0] == 104
assert array[1] == 101
assert array[2] == 108
assert array[3] == 108
assert array[4] == 111
assert array[5] == 61
assert array[6] == 119
assert array[7] == 111
assert array[8] == 114
assert array[9] == 108
assert array[10] == 100
| [
"[email protected]"
] | |
1ab76ebe4adb113218b9167f0a2548b32f27827f | 784e1867eafdfa0842ed4776b4740154de7200ba | /flash/lcd.py | eead9c357bb803b5890935910fe8c34aa4d1b15c | [
"MIT"
] | permissive | sleepychild/diy_sha_sep2019 | 080fb4c4dd5e2af9aa63f946f298f144dd701d8b | 2fdf9ec548e725e48013e6d84311da01a1b7521b | refs/heads/master | 2020-08-10T18:54:12.490761 | 2019-10-18T18:25:09 | 2019-10-18T18:25:09 | 214,400,253 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,747 | py | from time import sleep_ms, sleep_us
from machine import I2C, Pin
class LcdApi:
"""Implements the API for talking with HD44780 compatible character LCDs.
This class only knows what commands to send to the LCD, and not how to get
them to the LCD.
It is expected that a derived class will implement the hal_xxx functions.
"""
# The following constant names were lifted from the avrlib lcd.h
# header file, however, I changed the definitions from bit numbers
# to bit masks.
#
# HD44780 LCD controller command set
LCD_CLR = 0x01 # DB0: clear display
LCD_HOME = 0x02 # DB1: return to home position
LCD_ENTRY_MODE = 0x04 # DB2: set entry mode
LCD_ENTRY_INC = 0x02 # --DB1: increment
LCD_ENTRY_SHIFT = 0x01 # --DB0: shift
LCD_ON_CTRL = 0x08 # DB3: turn lcd/cursor on
LCD_ON_DISPLAY = 0x04 # --DB2: turn display on
LCD_ON_CURSOR = 0x02 # --DB1: turn cursor on
LCD_ON_BLINK = 0x01 # --DB0: blinking cursor
LCD_MOVE = 0x10 # DB4: move cursor/display
LCD_MOVE_DISP = 0x08 # --DB3: move display (0-> move cursor)
LCD_MOVE_RIGHT = 0x04 # --DB2: move right (0-> left)
LCD_FUNCTION = 0x20 # DB5: function set
LCD_FUNCTION_8BIT = 0x10 # --DB4: set 8BIT mode (0->4BIT mode)
LCD_FUNCTION_2LINES = 0x08 # --DB3: two lines (0->one line)
LCD_FUNCTION_10DOTS = 0x04 # --DB2: 5x10 font (0->5x7 font)
LCD_FUNCTION_RESET = 0x30 # See "Initializing by Instruction" section
LCD_CGRAM = 0x40 # DB6: set CG RAM address
LCD_DDRAM = 0x80 # DB7: set DD RAM address
LCD_RS_CMD = 0
LCD_RS_DATA = 1
LCD_RW_WRITE = 0
LCD_RW_READ = 1
def __init__(self, num_lines, num_columns):
self.num_lines = num_lines
if self.num_lines > 4:
self.num_lines = 4
self.num_columns = num_columns
if self.num_columns > 40:
self.num_columns = 40
self.cursor_x = 0
self.cursor_y = 0
self.backlight = True
self.display_off()
self.backlight_on()
self.clear()
self.hal_write_command(self.LCD_ENTRY_MODE | self.LCD_ENTRY_INC)
self.hide_cursor()
self.display_on()
def clear(self):
"""Clears the LCD display and moves the cursor to the top left
corner.
"""
self.hal_write_command(self.LCD_CLR)
self.hal_write_command(self.LCD_HOME)
self.cursor_x = 0
self.cursor_y = 0
def show_cursor(self):
"""Causes the cursor to be made visible."""
self.hal_write_command(self.LCD_ON_CTRL | self.LCD_ON_DISPLAY |
self.LCD_ON_CURSOR)
def hide_cursor(self):
"""Causes the cursor to be hidden."""
self.hal_write_command(self.LCD_ON_CTRL | self.LCD_ON_DISPLAY)
def blink_cursor_on(self):
"""Turns on the cursor, and makes it blink."""
self.hal_write_command(self.LCD_ON_CTRL | self.LCD_ON_DISPLAY |
self.LCD_ON_CURSOR | self.LCD_ON_BLINK)
def blink_cursor_off(self):
"""Turns on the cursor, and makes it no blink (i.e. be solid)."""
self.hal_write_command(self.LCD_ON_CTRL | self.LCD_ON_DISPLAY |
self.LCD_ON_CURSOR)
def display_on(self):
"""Turns on (i.e. unblanks) the LCD."""
self.hal_write_command(self.LCD_ON_CTRL | self.LCD_ON_DISPLAY)
def display_off(self):
"""Turns off (i.e. blanks) the LCD."""
self.hal_write_command(self.LCD_ON_CTRL)
def backlight_on(self):
"""Turns the backlight on.
This isn't really an LCD command, but some modules have backlight
controls, so this allows the hal to pass through the command.
"""
self.backlight = True
self.hal_backlight_on()
def backlight_off(self):
"""Turns the backlight off.
This isn't really an LCD command, but some modules have backlight
controls, so this allows the hal to pass through the command.
"""
self.backlight = False
self.hal_backlight_off()
def move_to(self, cursor_x, cursor_y):
"""Moves the cursor position to the indicated position. The cursor
position is zero based (i.e. cursor_x == 0 indicates first column).
"""
self.cursor_x = cursor_x
self.cursor_y = cursor_y
addr = cursor_x & 0x3f
if cursor_y & 1:
addr += 0x40 # Lines 1 & 3 add 0x40
if cursor_y & 2:
addr += 0x14 # Lines 2 & 3 add 0x14
self.hal_write_command(self.LCD_DDRAM | addr)
def putchar(self, char):
"""Writes the indicated character to the LCD at the current cursor
position, and advances the cursor by one position.
"""
if char != '\n':
self.hal_write_data(ord(char))
self.cursor_x += 1
if self.cursor_x >= self.num_columns or char == '\n':
self.cursor_x = 0
self.cursor_y += 1
if self.cursor_y >= self.num_lines:
self.cursor_y = 0
self.move_to(self.cursor_x, self.cursor_y)
def putstr(self, string):
"""Write the indicated string to the LCD at the current cursor
position and advances the cursor position appropriately.
"""
for char in string:
self.putchar(char)
def custom_char(self, location, charmap):
"""Write a character to one of the 8 CGRAM locations, available
as chr(0) through chr(7).
"""
location &= 0x7
self.hal_write_command(self.LCD_CGRAM | (location << 3))
self.hal_sleep_us(40)
for i in range(8):
self.hal_write_data(charmap[i])
self.hal_sleep_us(40)
self.move_to(self.cursor_x, self.cursor_y)
def hal_backlight_on(self):
"""Allows the hal layer to turn the backlight on.
If desired, a derived HAL class will implement this function.
"""
pass
def hal_backlight_off(self):
"""Allows the hal layer to turn the backlight off.
If desired, a derived HAL class will implement this function.
"""
pass
def hal_write_command(self, cmd):
"""Write a command to the LCD.
It is expected that a derived HAL class will implement this
function.
"""
raise NotImplementedError
def hal_write_data(self, data):
"""Write data to the LCD.
It is expected that a derived HAL class will implement this
function.
"""
raise NotImplementedError
def hal_sleep_us(self, usecs):
"""Sleep for some time (given in microseconds)."""
sleep_us(usecs)
# Defines shifts or masks for the various LCD line attached to the PCF8574
MASK_RS = 0x01
MASK_RW = 0x02
MASK_E = 0x04
SHIFT_BACKLIGHT = 3
SHIFT_DATA = 4
class I2cLcd(LcdApi):
"""Implements a HD44780 character LCD connected via PCF8574 on I2C."""
def __init__(self, i2c, i2c_addr, num_lines, num_columns):
self.i2c = i2c
self.i2c_addr = i2c_addr
self.i2c.writeto(self.i2c_addr, bytearray([0]))
sleep_ms(20) # Allow LCD time to powerup
# Send reset 3 times
self.hal_write_init_nibble(self.LCD_FUNCTION_RESET)
sleep_ms(5) # need to delay at least 4.1 msec
self.hal_write_init_nibble(self.LCD_FUNCTION_RESET)
sleep_ms(1)
self.hal_write_init_nibble(self.LCD_FUNCTION_RESET)
sleep_ms(1)
# Put LCD into 4 bit mode
self.hal_write_init_nibble(self.LCD_FUNCTION)
sleep_ms(1)
LcdApi.__init__(self, num_lines, num_columns)
cmd = self.LCD_FUNCTION
if num_lines > 1:
cmd |= self.LCD_FUNCTION_2LINES
self.hal_write_command(cmd)
def hal_write_init_nibble(self, nibble):
"""Writes an initialization nibble to the LCD.
This particular function is only used during initialization.
"""
byte = ((nibble >> 4) & 0x0f) << SHIFT_DATA
self.i2c.writeto(self.i2c_addr, bytearray([byte | MASK_E]))
self.i2c.writeto(self.i2c_addr, bytearray([byte]))
def hal_backlight_on(self):
"""Allows the hal layer to turn the backlight on."""
self.i2c.writeto(self.i2c_addr, bytearray([1 << SHIFT_BACKLIGHT]))
def hal_backlight_off(self):
"""Allows the hal layer to turn the backlight off."""
self.i2c.writeto(self.i2c_addr, bytearray([0]))
def hal_write_command(self, cmd):
"""Writes a command to the LCD.
Data is latched on the falling edge of E.
"""
byte = ((self.backlight << SHIFT_BACKLIGHT) | (((cmd >> 4) & 0x0f) << SHIFT_DATA))
self.i2c.writeto(self.i2c_addr, bytearray([byte | MASK_E]))
self.i2c.writeto(self.i2c_addr, bytearray([byte]))
byte = ((self.backlight << SHIFT_BACKLIGHT) | ((cmd & 0x0f) << SHIFT_DATA))
self.i2c.writeto(self.i2c_addr, bytearray([byte | MASK_E]))
self.i2c.writeto(self.i2c_addr, bytearray([byte]))
if cmd <= 3:
# The home and clear commands require a worst case delay of 4.1 msec
sleep_ms(5)
def hal_write_data(self, data):
"""Write data to the LCD."""
byte = (MASK_RS | (self.backlight << SHIFT_BACKLIGHT) | (((data >> 4) & 0x0f) << SHIFT_DATA))
self.i2c.writeto(self.i2c_addr, bytearray([byte | MASK_E]))
self.i2c.writeto(self.i2c_addr, bytearray([byte]))
byte = (MASK_RS | (self.backlight << SHIFT_BACKLIGHT) | ((data & 0x0f) << SHIFT_DATA))
self.i2c.writeto(self.i2c_addr, bytearray([byte | MASK_E]))
self.i2c.writeto(self.i2c_addr, bytearray([byte]))
class display_class(I2cLcd):
i2c = False
def __init__(self):
# I2C SCL GPIO22 SDA GPIO21
self.i2c = I2C(scl=Pin(22), sda=Pin(21), freq=400000)
super().__init__(self.i2c, self.i2c.scan()[0], 2, 16)
def network(self, ssid, ip):
# network
self.clear()
self.putstr(ssid)
self.move_to(0,1)
self.putstr(ip)
def time(self, current_time):
# time
self.clear()
self.putstr("{year}-{month}-{day}".format(year=current_time[0],month=current_time[1],day=current_time[2]))
self.move_to(0,1)
self.putstr("{hour}:{minute}:{second}".format(hour=current_time[3],minute=current_time[4],second=current_time[5]))
def sensor(self, raw_temp, hal):
# sensors
self.clear()
self.putstr("RAW:{raw_temp} HAL:{hal}".format(raw_temp=raw_temp, hal=hal))
lcd = display_class()
| [
"[email protected]"
] | |
3bcf078322ae579d964df68c431d2c8f4c29f199 | c291ba4506a8998df8d7f384c911f6a0a1294001 | /bai__61/CauTrucChuoi.py | 2cc3b1a07d8755446799000aea9455cb7b20fbed | [] | no_license | thanh-falis/Python | f70804ea4a3c127dcb7738d4e7c6ddb4c5a0a9d4 | fa9f98d18e0de66caade7c355aa6084f2d61aab3 | refs/heads/main | 2023-08-18T17:34:29.851365 | 2021-09-30T12:09:30 | 2021-09-30T12:09:30 | 398,952,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 917 | py | """
- Chuỗi là tập các ký tự nằm trong nháy đơn hoặc nháy đối, hoặc 3 nháy đơn hoặc 3 nháy đôi.
Chuỗi rất quan trọng trong mọi ngôn ngữ, hầu hết ta đều gặp trường hợp xử lý chuỗi.
- Chuỗi trong Python cũng là một đối tượng, nó cung cấp một số hàm rất quan trọng:
object.method(parameter list)
-Các hàm xử lý chuỗi:
upper, lower: Xử lý in hoa, in thường
rjust: Căn lề phải
ljust: căn lề trái
center: Căn giữa
strip: Xóa khoảng trắng due thừa
startswith: Kiểm tra chuỗi có phải bắt đầu là ký tự?
endswith: Kiểm tra chuỗi có phải kết thúc là ký tự?
count: Đếm số lần xuất hiện trong chuỗi
find: Tìm kiếm chu
""" | [
"thanhelma2020|@gmail.com"
] | thanhelma2020|@gmail.com |
224ac78c0406f31d43d284416a4540599a7ba65e | be4d3714a212156533e0a305c4d0eca30ed46aeb | /Project1.py | 7244e0f21994986f7ad7934eea523e03e2e0501e | [] | no_license | lqr0717/ds | 99139d1554ed1fcc710eb1308a81003e53755074 | 00839c2bec3e67012cd7a19951766b5e0aa5b19a | refs/heads/master | 2022-12-10T07:52:23.621673 | 2020-08-28T00:41:25 | 2020-08-28T00:41:25 | 266,041,272 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,169 | py | import calendar
from datetime import timedelta
import pandas as pd
import numpy as np
from dateutil import relativedelta
from pandas import Series, DatetimeIndex
from sklearn.linear_model import LinearRegression
import statsmodels.api as sm
from scipy import stats
import numpy as np
from statsmodels.stats.outliers_influence import summary_table
from sklearn import metrics
from statsmodels.tsa.arima_model import ARMA, ARIMA
from matplotlib.backends.backend_pdf import PdfPages
import seaborn as sns
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
# Functions #
def line_plot_find_missing_value(data,title, targets, columns, index, values):
null_table = pd.DataFrame(index=data[columns].unique())
with PdfPages(targets + title) as export_pdf:
for target in data[targets].unique():
data_by_main = data[data[targets] == target]
data_by_target_pivot = data_by_main.pivot(index=index, columns=columns,
values=values)
mm = pd.DataFrame(data_by_target_pivot.isnull().sum())
mm.columns = [target]
null_table = mm.merge(null_table,how = "outer", left_index=True, right_index=True)
data_by_target_pivot.plot()
plt.xlabel(index)
plt.ylabel(values)
plt.title(target)
export_pdf.savefig()
plt.close()
return null_table
def detect_outlier(data,title, criteria1, criteria2,threshold, values):
outlier_note = ""
for i in data[criteria1].unique():
for j in data[criteria2].unique():
dataset = data[(data[criteria2] == j) & (data[criteria1] == i) ]
outliers = []
z_outliers = []
threshold = threshold
mean = np.mean(dataset[values])
std = np.std(dataset[values])
for y in dataset[values]:
z_score = (y - mean) / std
if np.abs(z_score) > threshold:
outliers.append(y)
z_outliers.append(z_score)
if (len(outliers) != 0):
outlier_note = outlier_note + j + " in " + i + " is " + " ".join(
str(i) for i in outliers) + " when mean is " + str(round(mean, 2)) + " and z-score is " + " ".join(
str(round(i, 2)) for i in z_outliers) + "\n"
outlier_output = open(title, "w")
outlier_output.write(outlier_note)
outlier_output.close()
def correlation_plot(data,title, targets, columns, index,values, save_csv, plot_print):
with PdfPages(targets + title) as export_pdf:
for target in data[targets].unique():
data_by_main = data[data[targets] == target]
data_by_target_pivot = data_by_main.pivot(index=index, columns=columns,
values=values)
corr = data_by_target_pivot.corr()
if save_csv == True:
corr.to_csv(target[0:4] + r'_correlation.csv')
if plot_print == True:
# Draw the heatmap with the mask and correct aspect ratio
cmap = sns.diverging_palette(220, 10, as_cmap=True)
sns.set(font_scale=0.4)
ax = plt.axes()
sns.heatmap(corr, vmax=1, vmin=0.5, cmap = cmap,
square=True, linewidths=.3, cbar_kws={"shrink": .5}, annot=True)
ax.set_title(target)
export_pdf.savefig()
plt.close()
def encoder(dataset, catFeatures, qtyFeatures):
dataset = dataset[catFeatures + qtyFeatures]
dataset_encoded = pd.get_dummies(dataset,
columns=catFeatures,
drop_first=True)
return (dataset_encoded)
def add_variable(data, variable_name, rolling_window):
if variable_name == "Month":
data[variable_name] = DatetimeIndex(data['Period Begin']).month
if variable_name == "Year":
data[variable_name] = DatetimeIndex(data['Period Begin']).year
if variable_name in ["Mean(PastYear)" ,"Median(SameMonth)" ,"Mean(Past3Months)"]:
if variable_name == "Mean(Past3Months)":
weights = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0.3, 0.3, 0.4])
x = data[values].rolling(window=rolling_window).apply(lambda prices: np.dot(prices, weights) / weights.sum(), raw=True).dropna()
if variable_name == "Median(SameMonth)":
weights= np.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
x = data[values].rolling(window=rolling_window).apply(lambda prices: np.dot(prices, weights) / weights.sum(), raw=True).dropna()
if variable_name == "Mean(PastYear)":
x = data[values].rolling(window=rolling_window).mean().dropna()
x = x.drop(x.index[len(x) - 1])
data = data.iloc[rolling_window:]
x.index = data.index
data[variable_name] = x
return data
def backward_elimination(x, Y, pvalue_thred, columns):
var_count = len(x[0])
for i in range(0, var_count):
regressor_OLS = sm.OLS(Y, x).fit()
maxVar = max(regressor_OLS.pvalues).astype(float)
if maxVar > pvalue_thred:
for j in range(0, var_count - i):
if (regressor_OLS.pvalues[j].astype(float) == maxVar):
x = np.delete(x, j, 1)
columns = np.delete(columns, j)
regressor_OLS.summary()
return x, columns
# load raw data
file = "training_data.csv"
file_test = "test.csv"
data = pd.read_csv(file)
data_test = pd.read_csv(file_test)
print(data.head())
print(data.shape)
print(data.dtypes)
print(data.isnull().sum())
data["Period Begin"] = pd.to_datetime(data["Period Begin"])
data["Period End"] = pd.to_datetime(data["Period End"])
data["Median Sale Price (in 000's of dollars)"] = data["Median Sale Price (in 000's of dollars)"].str.replace(",", "").astype(float)
print(data.dtypes)
print(data.head())
print(data.shape)
# first look of data & find missings/outliers
line_plot_find_missing_value(data,"_initial_run.pdf", "City","Property Type","Period Begin", "Median Sale Price (in 000's of dollars)")
missing_value = line_plot_find_missing_value(data, "_initial_run.pdf","Property Type","City","Period Begin", "Median Sale Price (in 000's of dollars)")
missing_value.to_csv(r'Missing_Value.csv')
detect_outlier(data,"Outliers.txt", "City", "Property Type",4, "Median Sale Price (in 000's of dollars)")
# Data Cleansing
## update outlier with mean
values = "Median Sale Price (in 000's of dollars)"
data[values][(data[values] > 1000) & (data["City"] == "Olympia") & (data["Property Type"] == "Condo/Co-op")] = np.mean(data[values][(data[values] < 1000) & (data["City"] == "Olympia") & (data["Property Type"] == "Condo/Co-op")])
## fillin missing value except for multi family property type, "Mercer Island" - Townhouse missing 104 majority missing ,"Snoqualmie" - Condo/Co-op missing 61 missing more than half
data_n = pd.DataFrame()
unique_cal = pd.DataFrame(index = data["Period Begin"].unique())
unique_cal = unique_cal.sort_index(axis = 0)
null_dict = {"Townhouse":["Issaquah","Kenmore","Olympia","Snoqualmie"], "Condo/Co-op":[ "Kenmore","Olympia","Mercer Island"]}
for key, items in null_dict.items():
for item in items:
data_1 = data[(data["City"] == item) & (data["Property Type"] == key)]
data_1 = data_1.set_index("Period Begin", drop=False)
data_1 = data_1.sort_index(axis=0)
index_drop = data[(data["City"] == item) & (data["Property Type"] == key)].index
data.drop(index_drop, inplace=True)
data_1 = data_1.merge(unique_cal,how = "outer",left_index=True, right_index=True)
data_1[values]= data_1[values].interpolate()
data_1[values] = data_1[values].fillna(method = "ffill")
data_1[values] = data_1[values].fillna(method = "bfill")
data_1["City"] = "".join(data_1["City"].dropna().unique())
data_1["Property Type"] = "".join(data_1["Property Type"].dropna().unique())
data_1['Period Begin'][data_1['Period Begin'].index[data_1['Period Begin'].apply(np.isnan)]] = data_1['Period Begin'].index[data_1['Period Begin'].apply(np.isnan)]
data = data.append(data_1)
print(data.shape)
data = data.set_index("Period Begin", drop=False)
data = data.sort_index(axis=0)
# check data one more time & check correlation
line_plot_find_missing_value(data, "_fillin_null_run.pdf","City","Property Type","Period Begin", "Median Sale Price (in 000's of dollars)")
correlation_plot(data,"_correlation.pdf", "City", "Property Type","Period Begin","Median Sale Price (in 000's of dollars)", False, True)
correlation_plot(data,"_correlation.pdf", "Property Type","City","Period Begin", "Median Sale Price (in 000's of dollars)", False, True)
# reformat data adding rolling avg & set X & Y & feature selection & backward elimination
data_length_threshold = len(data.index.unique())*0.8
rolling_window = 12
x_corr_thred = 0.9
pvalue_thred = 0.05
variable_selected = ""
result_table = pd.DataFrame(columns = ['City', 'Property Type', "R2","MSE", "MAE"])
data_ts = pd.DataFrame()
data_pred_jan = pd.DataFrame()
criteria1 = "City"
criteria2 = "Property Type"
for ii in data[criteria1].unique():
for jj in data[criteria2].unique():
# ii = "Olympia"
# jj = "Townhouse"
data_1 = data[(data[criteria2] == jj) & (data[criteria1] == ii)]
data_1 = data_1.sort_index(axis=0)
if(len(data_1.index.unique())>data_length_threshold):
# trandform data & create X
data_1 = add_variable(data_1, "Month", 0)
data_1 = add_variable(data_1, "Year", 0)
data_1 = add_variable(data_1, "Mean(PastYear)", rolling_window)
data_1 = add_variable(data_1, "Mean(Past3Months)", rolling_window)
data_1 = add_variable(data_1, "Median(SameMonth)", rolling_window)
X = data_1[["Mean(PastYear)","Median(SameMonth)","Mean(Past3Months)", "Month","Year"]]
y = data_1[[values]]
# X feature selection
corr = X.corr()
columns = np.full((corr.shape[0],), True, dtype=bool)
for i in range(corr.shape[0]):
for j in range(i + 1, corr.shape[0]):
if corr.iloc[i, j] >= x_corr_thred:
if columns[j]:
columns[j] = False
selected_columns = X.columns[columns]
X = X[selected_columns]
selected_columns = selected_columns[:].values
X_modeled, selected_columns = backward_elimination(X.iloc[:,:].values, y.iloc[:,0].values, pvalue_thred,selected_columns)
X_selected = pd.DataFrame(X_modeled, columns=selected_columns)
cols = ""
for col in selected_columns:
cols += col + " "
variable_selected = variable_selected + jj + " in " + ii + " uses variable " + cols + "\n"
# Run linear regression model on selected variable - R2, MAE, MSE
linreg = LinearRegression(fit_intercept=True)
re = linreg.fit(X_selected, y)
y_train_pred = linreg.predict(X_selected)
y_train_pred = np.array(y_train_pred[:,0])
y_actual = y.iloc[:,0].values
mse = np.sqrt(sum((y_train_pred - y_actual)**2) / len(y_train_pred))
mae = sum(abs(y_train_pred - y_actual)) / len(y_train_pred)
result_table = result_table.append({'City' :ii ,'Property Type':jj, "R2":metrics.r2_score(y, y_train_pred), "MSE": mse, "MAE": mae} , ignore_index=True)
# predict test data
data_t = data_1.tail(12)
t_df1 = pd.DataFrame([[ii, jj, data_t['Period Begin'][-1]+ relativedelta.relativedelta(months=1), 0, 0, 0, 0, 0, 0, 0]], columns = data_t.columns)
t_df2 = pd.DataFrame([[ii, jj, data_t['Period Begin'][-1]+ relativedelta.relativedelta(months=2), 0, 0, 0, 0, 0, 0, 0]], columns = data_t.columns)
t_df3 = pd.DataFrame([[ii, jj, data_t['Period Begin'][-1]+ relativedelta.relativedelta(months=3), 0, 0, 0, 0, 0, 0, 0]], columns = data_t.columns)
t_df4 = pd.DataFrame([[ii, jj, data_t['Period Begin'][-1]+ relativedelta.relativedelta(months=4), 0, 0, 0, 0, 0, 0, 0]], columns = data_t.columns)
data_t = pd.concat([ data_t, t_df1, t_df2, t_df3, t_df4], ignore_index=True)
for i in [0, 1, 2,3]:
data_tt = data_t.iloc[rolling_window:]
for var in selected_columns:
data_temp = add_variable(data_t, var, rolling_window)
data_tt[var] = data_temp[var]
n = list(selected_columns)
X_pred = pd.DataFrame(data_tt[n].iloc[i]).transpose()
y_pred = linreg.predict(X_pred)
data_t[values].iloc[i+12] = y_pred
data_ts = data_ts.append(data_t[[criteria1, criteria2,"Period Begin" ,"Period End",values]].tail(4))
data_ts["Period End"] = data_ts["Period Begin"] + timedelta(-1)
data_ts["Period End"] = data_ts["Period End"] .apply(lambda date: date+relativedelta.relativedelta(months=1))
for_test = pd.DataFrame(data_t[[criteria1, criteria2, values]].iloc[-1]).transpose()
data_pred_jan = data_pred_jan.append(for_test)
# Output prediction for 4 months result
data_ts.to_csv(r'Prediction_Result_Final.csv')
# Compare prediction to actual Jan 2020
data_pred_jan = data_pred_jan.set_index([criteria1, criteria2])
data_pred_jan.columns = ["Predicted_Median"]
data_test = data_test.set_index([criteria1, criteria2])
test_result_validation = data_pred_jan.merge(data_test,how = "outer", left_index=True, right_index=True)
test_result_validation[values] = test_result_validation[values].str.replace("K", "")
test_result_validation[values] = test_result_validation[values].str.replace("$", "")
test_result_validation[values] = test_result_validation[values].str.replace(",", "").astype(float)
test_result_validation = test_result_validation.reset_index()
test_result_validation.to_csv(r'Test_Prediction_Comparison.csv')
plt.plot( 'Predicted_Median', data=test_result_validation, marker='o', markerfacecolor='blue', markersize=12, color='skyblue', linewidth=4)
plt.plot( values, data=test_result_validation, marker='', color='olive', linewidth=2)
plt.legend()
# print out Training result
r2_table = result_table.pivot(index='Property Type', columns='City', values="R2")
mse_table = result_table.pivot(index='Property Type', columns='City', values="MSE")
mae_table= result_table.pivot(index='Property Type', columns='City', values="MAE")
r2_table.to_csv(r'R2_result.csv')
mse_table.to_csv(r'MSE_Training_result.csv')
mae_table.to_csv(r'MAE_Training_result.csv')
variable_note = open("Variables_Selected_Output.txt", "w")
variable_note.write(variable_selected)
variable_note.close()
| [
"[email protected]"
] | |
73d7e289d0aaa96ccf12b9862628bdf616f49aa5 | 5013dd7116a8455ddbb3a62bb6129a43a8c974fa | /django_project/urls.py | e931f0537a31d8f40c36b48d3b19152caf4c4702 | [] | no_license | priyanka36/BlogApp | 95acebf296d1637906334bc821cbdf677eebda73 | 472711198981961efab134f1fef86aa3d004550b | refs/heads/master | 2023-02-21T10:37:05.243040 | 2023-02-17T08:28:47 | 2023-02-17T08:28:47 | 313,916,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,844 | py | """django_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from users import views as user_views
urlpatterns=[
path('admin/', admin.site.urls),
path('register/',user_views.register,name='register'),
path('profile/',user_views.profile,name='profile'),
path('login/',auth_views.LoginView.as_view(template_name='users/login.html'), name='login'),
path('logout/',auth_views.LogoutView.as_view(template_name='users/logout.html'), name='logout'),
path('', include('blog.urls')),
path('password-reset/',auth_views.PasswordResetView.as_view(template_name='users/password-reset.html'), name='password-reset'),
path('password-reset-done/',auth_views.PasswordResetDoneView.as_view(template_name='users/password_reset_done.html'), name='password_reset_done'),
path('password_reset_confirm/<uidb64>/<token>/',auth_views.PasswordResetConfirmView.as_view(template_name='users/password_reset_confirm.html'), name='password_reset_confirm'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"[email protected]"
] | |
c4af6ac4f3b8b7b14414c155611811aa2a2eea11 | 5864e86954a221d52d4fa83a607c71bacf201c5a | /carbonui/util/sortUtil.py | 630c63b8ab979b409b262d7bf56fdf40770b56a4 | [] | no_license | connoryang/1v1dec | e9a2303a01e5a26bf14159112b112be81a6560fd | 404f2cebf13b311e754d45206008918881496370 | refs/heads/master | 2021-05-04T02:34:59.627529 | 2016-10-19T08:56:26 | 2016-10-19T08:56:26 | 71,334,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 702 | py | #Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\packages\carbonui\util\sortUtil.py
def Sort(lst):
lst.sort(lambda x, y: cmp(str(x).upper(), str(y).upper()))
return lst
def SortListOfTuples(lst, reverse = 0):
lst = sorted(lst, reverse=reverse, key=lambda data: data[0])
return [ item[1] for item in lst ]
def SortByAttribute(lst, attrname = 'name', idx = None, reverse = 0):
newlst = []
for item in lst:
if idx is None:
newlst.append((getattr(item, attrname, None), item))
else:
newlst.append((getattr(item[idx], attrname, None), item))
ret = SortListOfTuples(newlst, reverse)
return ret
| [
"[email protected]"
] | |
caede67f4b4110575ba80d4905b591f525a1d1aa | 701f160e6fae80575b8210ac5ae14bc773231cff | /facial_landmarks_detection.py | 6b67f7371cf37548a86e64927cbea991c02fef44 | [] | no_license | pombero/Computer_Pointer_Controller | 6339d6e755d7cf17f1a9ca727e26edfb71589637 | d67f8f4302cdf09da3998ca2431a2c12fd8aee1c | refs/heads/master | 2022-11-22T01:43:36.225704 | 2020-07-22T23:25:15 | 2020-07-22T23:25:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,520 | py | import cv2
from openvino.inference_engine.ie_api import IENetwork, IECore
import pprint
# To crop the eyes from the face, we use a square sized with 1/5 the width of the face.
EYE_FACE_COEF = 0.2
class Facial_Landmarks_Detection:
def __init__(self, model_name, device='CPU', extensions=None, perf_counts="False"):
self.model_weights = model_name + '.bin'
self.model_structure = model_name + '.xml'
self.device = device
self.extensions = extensions
try:
self.model = IENetwork(self.model_structure, self.model_weights)
except Exception as e:
raise ValueError("Could not Initialise the network. Have you enterred the correct model path?")
self.input_name = next(iter(self.model.inputs))
self.input_shape = self.model.inputs[self.input_name].shape
self.output_name = next(iter(self.model.outputs))
self.output_shape = self.model.outputs[self.output_name].shape
self.net = None
self.pp = None
if perf_counts == "True":
self.pp = pprint.PrettyPrinter(indent=4)
def load_model(self):
core = IECore()
if self.extensions != None:
core.add_extension(self.extensions, self.device)
self.net = core.load_network(network=self.model, device_name=self.device, num_requests=1)
def predict(self, image):
preprocessed_image = self.preprocess_input(image)
output = self.net.infer({self.input_name: preprocessed_image})
if self.pp is not None:
self.pp.pprint(self.net.requests[0].get_perf_counts())
return self.preprocess_output(next(iter(output.values()))[0], image)
def check_model(self):
raise NotImplementedError
def preprocess_input(self, image):
image = cv2.resize(image, (self.input_shape[3], self.input_shape[2]))
image = image.transpose((2, 0, 1))
image = image.reshape(1, *image.shape)
return image
def preprocess_output(self, outputs, image):
width = int(image.shape[1])
height = int(image.shape[0])
eye_square_size = int(width * EYE_FACE_COEF)
left_eye = cv2.getRectSubPix(image, (eye_square_size, eye_square_size), (outputs[0] * width + eye_square_size / 2, outputs[1] * height + eye_square_size / 2))
right_eye = cv2.getRectSubPix(image, (eye_square_size, eye_square_size), (outputs[2] * width + eye_square_size / 2, outputs[3] * height + eye_square_size / 2))
return left_eye, right_eye | [
"[email protected]"
] | |
5a8316a2dbb0ec3afabf9380ee37c547a9ff9e04 | f163ed5554ea2be144dbeb3b9189aec6fbf18f93 | /6035/main.py | 1caa2a3f2d9fdbe8757a519e03198c94156ccf1a | [] | no_license | YoungWoongJoo/CodeUp-Python-basic-100 | dd72ee22a121a3bc56171c963e5dc7f5a3e81d57 | c0f45263134d4ab836a127c5f13c031e7c972a6c | refs/heads/master | 2023-03-30T01:38:51.157600 | 2021-04-02T19:03:19 | 2021-04-02T19:03:19 | 352,991,932 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | """
실수 2개(f1, f2)를 입력받아 곱을 출력하는 프로그램을 작성해보자.
입력
2개의 실수가 공백으로 구분되어 입력된다.
출력
첫 번째 실수와 두 번째 실수를 곱한 값을 출력한다.
"""
float1, float2 = input().split()
print(float(float1)*float(float2)) | [
"[email protected]"
] | |
b104099fe37008bf6fd58a2e92fcf094aafb1fcf | f3d1198ddb6358d0ed57e027e62a24c084c0a378 | /NCS/repeat.py | 9e33d717218a32ce32b848f37cf3a4b901cd675b | [] | no_license | MingjiaLi666/NCS | 0d1ac6a3fa238f66984a7d73da84f1e6c9f47b55 | 7bb684a6b6044dad20c89141e04b19be44538943 | refs/heads/master | 2021-02-06T07:36:59.032447 | 2020-03-03T06:20:49 | 2020-03-03T06:20:49 | 243,893,795 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,286 | py | from src.policy import Policy
import numpy as np
import pickle
import gym
import os
class Visualizer(object):
def __init__(self, game, network, train_directory):
self.game = game
env_name = '%sNoFrameskip-v4' % game
env = gym.make(env_name)
# env = gym.wrappers.Monitor(env, '/tmp/temp_%s' % game, mode='evaluation', force=True)
vb_file = os.path.join(train_directory, "vb.npy")
vb = np.load(vb_file)
parameters_file = 'parameters_81'
self.policy = Policy(env, network, "relu")
parameters_path = os.path.join(train_directory, parameters_file)
print('Using parameters file %s \n' % parameters_path)
with open(parameters_path, 'rb') as f:
parameters = pickle.load(f)['parameters']
self.policy.set_parameters(parameters)
self.policy.set_vb(vb)
def play_game(self):
rews = [0]*100
for i in range(100):
rew,step = self.policy.rollout()
rews[i] = rew
print(np.mean(rews))
print(np.max(rews))
print(rews)
if __name__ == '__main__':
vis = Visualizer('Venture', 'Nature', train_directory='./logs_mpi/Venture/Baseline/Nature/8/50/0.010000/1.000000/1.000000/NCSVenture2')
vis.play_game()
| [
"[email protected]"
] | |
4e4ed47d359398fb037c8b798ffec59ab1e4919b | 402879a5e26875dd4b3adfd359588abe62cc85b3 | /pynumber1.py | d5b030d89c63ac028cddfc052c0633601dc71f65 | [] | no_license | RiddhimGupta/MachineLearning | 5becb7012755e98d4492e5c2fc44996deb6cb89c | 4f597bbe7fcbdd42dbad2fb30d8210b7c24dd16e | refs/heads/master | 2021-07-11T16:10:41.234894 | 2020-08-27T15:28:08 | 2020-08-27T15:28:08 | 192,696,649 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 308 | py | #!/usr/bin/python3
import numpy as np
import random
row=int(input("Enter rows:"))
col=int(input("Enter columns: "))
f=open("array.txt","w")
#Inserting random values in array
x=np.random.randint(9,size=(row,col))
print(x)
f.write('Array\n')
#Saving integer data to array
np.savetxt(f, x,fmt='%d')
| [
"[email protected]"
] | |
4d7e2d426d0c30ad544dac762ce0bbe607ad82b0 | 3bdb841d577031608b24eb63f6c511738d49e373 | /app/main/errors.py | 3f0cacbba20270a9e53b83ff07d6551418acf5d5 | [] | no_license | wangyuhuiever/blog | f0ea1d718a5c24d4647808b98a7e0e30519f1d6e | 752c8f7cce5e03993c2cecbcf3a4f5eba297154f | refs/heads/master | 2022-12-22T19:21:42.445162 | 2017-06-15T03:26:43 | 2017-06-15T03:26:43 | 93,734,963 | 0 | 0 | null | 2022-09-16T17:45:09 | 2017-06-08T10:03:43 | Python | UTF-8 | Python | false | false | 1,002 | py | from . import main
from flask import render_template, request, jsonify
@main.app_errorhandler(404)
def page_not_found(e):
if request.accept_mimetypes.accept_json and \
not request.accept_mimetypes.accept_html:
response = jsonify({'error': 'not found'})
response.status_code = 404
return response
return render_template('404.html'), 404
@main.app_errorhandler(500)
def internal_server_error(e):
if request.accept_mimetypes.accept_json and \
not request.accept_mimetypes.accept_html:
response = jsonify({'error': 'internal server error'})
response.status_code = 500
return response
return render_template('500.html'), 500
@main.app_errorhandler(403)
def forbidden(e):
if request.accept_mimetypes.accept_json and \
not request.accept_mimetypes.accept_html:
response = jsonify({'error': 'forbidden'})
response.status_code = 403
return response
return render_template('403.html'), 403 | [
"[email protected]"
] | |
90675f8cbc670d60240bf5b21a878105c2ae2522 | fc286168bb8a9669822934bcde70c099b872d1e0 | /Experimentation/LSTM/LR5/showHistogram.py | 0c84ae092d66881034b934f92c46af1e485266e6 | [] | no_license | ishankapnadak/Vector-Based-Navigation | 70b98a1581e82c80edb1e24ff6aad9c617c8f772 | 5607b67afe2e44a9f21f8b7ded6d30a16942dae3 | refs/heads/main | 2023-08-30T13:02:04.202756 | 2021-11-04T06:50:56 | 2021-11-04T06:50:56 | 386,834,174 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,683 | py | import numpy as np
import os
import matplotlib.pyplot as plt
import os
from scipy.signal import correlate2d
from ratSimulator import RatSimulator
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
def showHistogram(agent, dataGenerator, num_traj, num_steps, llu, slu, tlu, bins):
#factor=2.2/bins
#activityMap=np.zeros((llu, bins, bins))
#counter = 0
X=np.zeros((num_traj,num_steps,3))
positions=np.zeros((num_traj,num_steps,2))
angles=np.zeros((num_traj,num_steps,1))
hist = np.zeros((num_steps,num_traj))
env=RatSimulator(num_steps)
print(">>Generating trajectory")
for i in range(num_traj):
vel, angVel, pos, angle =env.generateTrajectory()
X[i,:,0]=vel
X[i,:,1]=np.sin(angVel)
X[i,:,2]=np.cos(angVel)
positions[i,:]=pos
#init_X=np.zeros((num_traj,8,pcu + hcu))
home_location = pos[0]
#for i in range(8):
#init_X[:, i, :pcu]=dataGenerator.computePlaceCellsDistrib(positions[:,(i*100)], place_cell_centers)
#init_X[:, i, pcu:]=dataGenerator.computeHeadCellsDistrib(angles[:,(i*100)], head_cell_centers)
print(">>Computing Actvity maps")
#Feed 500 examples at time to avoid memory problems. Otherwise (10000*100=1million matrix)
batch_size=1
for startB in range(0, num_traj, batch_size):
endB=startB+batch_size
home_X, home_Y = home_location
#fig=plt.figure(figsize=(12,12))
#ax=fig.add_subplot(111)
#ax.set_title("Trajectory agent")
#ax.plot(pos[:,0], pos[:,1])
#ax.set_xlim(0,2.2)
#ax.set_ylim(0,2.2)
#ax.grid(True)
#ax.plot(home_X, home_Y, 'o')
#Divide the sequence in 100 steps.
#print(current_X)
#Retrieve the inputs for the timestep
xBatch=X[startB:endB]
#print(xBatch.shape)
#When the timestep=0, initialize the hidden and cell state of LSTm using init_X. if not timestep=0, the network will use cell_state and hidden_state
feed_dict={ agent.X: xBatch,
#agent.placeCellGround: init_X[startB:endB, (startT//100), : pcu],
#agent.headCellGround: init_X[startB:endB, (startT//100), pcu :]
agent.keepProb : 1
}
norm = agent.sess.run([agent.OutputNorm], feed_dict=feed_dict)
norm = norm[0].reshape((num_steps))
#print(norm)
#print(norm.shape)
#norm_end = norm[99]
hist[startB] = norm
plt.imshow(hist, cmap="inferno", vmin=0, vmax=2.2)
plt.colorbar()
plt.savefig('histogram.jpg')
plt.clf() | [
"[email protected]"
] | |
ece59419e4396886a8756b065d0c0178e29d9758 | 6b7b08d3bb417602daaf42e8c59b817bf97a9560 | /httpdetector/filters.py | 7b3c8db13b10d5e8be50ce15377d940703f7970b | [] | no_license | CogniBuild/Campus.FakeDetector | 6d17f15b86d082b2d3a53e07dd4f17c2981fd0d7 | 9a0cf540ea91e4e2ed7e4a6326c3e45b563c896a | refs/heads/master | 2023-05-01T23:13:29.044566 | 2021-05-03T23:16:07 | 2021-05-03T23:16:07 | 335,558,878 | 2 | 0 | null | 2021-05-03T21:31:35 | 2021-02-03T08:38:53 | Python | UTF-8 | Python | false | false | 236 | py | import numba
import numpy as np
@numba.njit(parallel=True, fastmath=True)
def rgb2gray(rgb: np.array) -> np.array:
r, g, b = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
return gray
| [
"[email protected]"
] | |
ab41880b43db3d2cba9673d0c901a444e21a4bb1 | 867838b7fa64351d933f003f0daca555a6071b52 | /www/views.py | 5ee6a26a6a67cccd3f569d11962797fc2d31fac2 | [] | no_license | RLavoisier/app | fa978b5130a785434eabd9d7e86220e0b6df947b | e361881a2b03e63fd0363660c1590088d0511beb | refs/heads/master | 2021-06-18T09:10:43.398262 | 2017-07-06T13:34:50 | 2017-07-06T13:34:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 370 | py | from django.shortcuts import render
from order.helpers.helper_orders import HOrder
h_order = HOrder()
def index(request):
return render(request, 'orders.html', locals())
def order(request, id):
order = h_order.getOrderById(id)
return render(request, 'order-detail.html', locals())
def new(request):
return render(request, 'new_order.html', locals())
| [
"[email protected]"
] | |
97b340bc6e3f0c151b5b2029d8896a92fb6b2528 | 4c413f3787e66155d90a3cdce87b6527b7dae459 | /app/cokkies.py | c235f510a4d7b3ddfdb4b9af5b90d9f857cb7cc3 | [] | no_license | priyanshukumarcs049/flask | 61b0339c9dd5afc62625d35b3b42902383d95402 | 5d6828d87cd648d050fec78465bcb8e607d1530e | refs/heads/master | 2020-04-01T10:05:38.901968 | 2018-10-17T12:08:05 | 2018-10-17T12:08:05 | 153,102,409 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 580 | py | from flask import Flask, render_template, request, make_response
app = Flask(__name__)
@app.route('/')
def index():
return render_template('cookie_index.html')
@app.route('/setcookie', methods = ['POST', 'GET'])
def setcookie():
if request.method == 'POST':
user = request.form("name")
resp = make_response(render_template('readcookies.html'))
resp.set_cookie('userID', user)
return resp
@app.route('/getcookie')
def getcookie():
name = request.cookies.get('userID')
return '<h1>Hii ' +name+'</h1>'
if __name__ == '__main__':
app.run(port = 5006, debug = True)
| [
"[email protected]"
] | |
bc1e3c5d20873ca9391c87f732e80a13dfbc1c5d | d93c625c9a907aa3b282a9bbb5c1629df0aafa47 | /CarControl/control.py | 7b7032b5a79bc9c9efd8c791c8178c6caaf9fac2 | [
"Apache-2.0"
] | permissive | angrajales/SDC_Eafit | 4baac4b439a6276537e99d8f004acd68082b0659 | ecbde0235100655baa1cbd189ab02ac2c9f35e67 | refs/heads/master | 2020-05-23T22:18:18.292455 | 2019-05-17T19:27:48 | 2019-05-17T19:27:48 | 173,021,459 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 51 | py | #################### TO DO #######################
| [
"[email protected]"
] | |
7e4122c506b21e8a8186d819d392ab83c5bf0fd2 | 2fb70181b485028a219e719a2f3f922458e3b9bd | /ecomstore/apps/catalog/apps.py | 4780c3552a4c85418e987d6c6429682e831131e9 | [] | no_license | Sharmiko/ecomstore | 87b438586f4c6600cdcf122f7a05776e5ce95da8 | 3b94fab9880cad196d9b319dc9a7b5ece37612c5 | refs/heads/master | 2023-03-23T05:32:42.053107 | 2021-03-06T17:46:30 | 2021-03-06T17:46:30 | 305,790,020 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | from django.apps import AppConfig
class CatalogConfig(AppConfig):
name = 'ecomstore.apps.catalog'
| [
"[email protected]"
] | |
40ef952b1158ae785ea25449e54550529aa42607 | 10feff5f938ef02732e2f7a529c9a7b1b422be26 | /stock_list_and_search.py | 0f2054ed8b0b86243c488ac3dcd0608855999324 | [] | no_license | Miren16/MNMS-Financials-App | 4f1015227c7d4d852ffc732ddea8f3780149ed16 | 7e52c1b62d65cc11351340abade117cf6ba2876b | refs/heads/master | 2023-04-26T07:02:03.115858 | 2021-05-18T01:05:29 | 2021-05-18T01:05:29 | 368,360,973 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,442 | py | import pickle
pickled_list_filename = 'pickled_stock_list'
imported_pickled_stock_list = open(pickled_list_filename, 'rb')
new_list_of_stock_lists = pickle.load(imported_pickled_stock_list)
imported_pickled_stock_list.close()
#searches all the companies in the list of lists at position [x], looking for a partial string match with the ticker at [x][0] or name at [x][1]
# Note that some companies have official corporate names different from a more well-known past name or popular moniker (eg, Google is not the name
# of the corporation, it's Alphabet, Incorporated. Since the stock ticker is GOOGL, searching "Google" will not return any valid results. A simple
# if statement can be added for "Google" in light of it's popularity, but hardcoding exceptions for an insignificant portion of the >8000 stocks in
# the list is not feasible.)
def search_for_company(search_string):
search_string = search_string.casefold()
list_of_matches = []
for x in range(len(new_list_of_stock_lists)):
lowercase_ticker_compare = new_list_of_stock_lists[x][0].casefold()
lowercase_company_compare = new_list_of_stock_lists[x][1].casefold()
if search_string in lowercase_ticker_compare or search_string in lowercase_company_compare:
list_of_matches.append(new_list_of_stock_lists[x])
if len(list_of_matches) == 0:
return ["No matches found!"]
else:
return(list_of_matches)
| [
"[email protected]"
] | |
edbc372709806c119208089061bdcfc64460c66f | e69feafbf7bea314db5286937de0b3987cbd2b84 | /venv/Scripts/lista_circular_doble_enlazada.py | 2ee44e3bc8c913703911f19dab614f7f4b2f7b59 | [] | no_license | jcdlacruz/umg-SOI-1er-parcial | 51a84abbb062bd93d34f60b10259bc3db35eb196 | df637f21128847a4b7c8c2a39be5e6dc5a8b5982 | refs/heads/master | 2022-11-30T09:08:01.864244 | 2020-08-09T19:33:59 | 2020-08-09T19:33:59 | 286,296,916 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,961 | py | from nodo import Nodo
class ListaCirculaDobleEnlazada:
def __init__(self):
self.primero = None
self.ultimo = None
def vacia(self):
if self.primero == None:
return True
else:
return False
def agregar_inicio(self, dato):
if self.vacia():
self.primero = self.ultimo = Nodo(dato)
else:
aux = Nodo(dato)
aux.siguiente = self.primero
self.primero.anterior = aux
self.primero = aux
self.__unir_nodos()
def agregar_final(self, dato):
if self.vacia():
self.primero = self.ultimo = Nodo(dato)
else:
aux = self.ultimo
self.ultimo = aux.siguiente = Nodo(dato)
self.ultimo.anterior = aux
self.__unir_nodos()
def eliminar_inicio(self):
if self.vacia():
print("Estructura se encuentra vacia")
elif self.primero == self.ultimo:
self.primero = self.ultimo = None
else:
self.primero = self.primero.siguiente
self.__unir_nodos()
def eliminar_ultimo(self):
if self.vacia():
print("Estructura se encuentra vacia")
elif self.primero == self.ultimo:
self.primero = self.ultimo = None
else:
self.ultimo = self.ultimo.anterior
self.__unir_nodos()
def __unir_nodos(self):
if self.primero != None:
self.primero.anterior = self.ultimo
self.ultimo.siguiente = self.primero
def recorrer_inicio_a_fin(self):
aux = self.primero
while aux:
print(aux.dato)
aux = aux.siguiente
if aux == self.primero:
break
def recorrer_fin_a_inicio(self):
aux = self.ultimo
while aux:
print(aux.dato)
aux = aux.anterior
if aux == self.ultimo:
break | [
"[email protected]"
] | |
0755247d769e1e57dfea2d61e1b42be00cdbae6d | d2f3c079437c905b9496240ff5742199f539b742 | /pusher/migrations/0005_notificationdata_stress.py | d1f2145261e4987f577f416e7eeea9e33156ea1e | [] | no_license | mattiasedin/noticeTester | aa9b564aa1b86c985f543c22af8bb5698e138e6f | dc128ac9c04266c784fa8776b4a54d36117a634b | refs/heads/master | 2021-01-21T13:43:56.892798 | 2016-04-27T13:29:08 | 2016-04-27T13:29:08 | 52,272,614 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 607 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-04-13 12:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pusher', '0004_participant_registered'),
]
operations = [
migrations.AddField(
model_name='notificationdata',
name='stress',
field=models.IntegerField(choices=[(-1, 'choose one'), (1, 'not at all'), (2, 'just a little'), (3, 'to some extent'), (4, 'rather much'), (5, 'very much')], default=-1, max_length=1),
),
]
| [
"[email protected]"
] | |
3b291b7d6a83ea992612d3f1eb94d21dafb8b20a | 16724f3efa1702fc0fd2b5c261e2dc50485a526e | /config_generator/port_reconfig.py | a3f6a2497e63c96c170c24ca8ada7176f38d66b1 | [] | no_license | jazheng1/switch-config-tool | f26a68e3d7a04b9c950b9fc7cc7f9bfc40720e0c | b93989402c144af65aa8ee0878dec7cc713df7c0 | refs/heads/main | 2023-06-02T08:07:28.571241 | 2021-06-23T14:40:04 | 2021-06-23T14:40:04 | 379,633,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 897 | py | # This program is designed to modify an existing switch port using designated templates of config.
# It first asks for a switch name to connect to, then a port you want to modify, along with user/pass for the switch.
# Then it displays the current config of the port and asks what template you want to use to modify it.
import paramiko
# Ask for the switch name and set it as the hostname variable.
hostname = input("Enter the switch name: ")
# Ask for the interface name and set it as the interface variable.
interface = input("Enter the interface name: ")
# Ask for the username name and set it as the username variable.
username = input("Enter the username name: ")
# Ask for the password name and set it as the password variable.
password = input("Enter the password name: ")
#Connect to the switch and gather the config
ssh = paramiko.SSHClient()
ssh.connect(hostname,
| [
"[email protected]"
] | |
4a409d614205f0a4297ec6b4f413a11636620859 | 128776be8c3404badebde79555208cb64919d0fb | /apps/08 - File Searcher/file_search.py | 4af1213a85077aae0790daf71dab2195051d09fd | [
"MIT"
] | permissive | Atropos148/Jumpstart10apps | 68a5277b3c2e72c7a38dad7cf03069b4f43a0c66 | 4b288a6f2a592b1bfb7fe6169ea566e1f236d886 | refs/heads/master | 2021-09-22T23:06:57.734101 | 2021-09-11T15:04:12 | 2021-09-11T15:04:12 | 132,234,050 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,030 | py | import os
import collections
SearchResult = collections.namedtuple('SearchResult',
'file, line, text')
def main():
write_header()
folder = get_folder_from_user()
if not folder:
print("Sorry, we can't search that location.")
return
text = get_search_text_from_user()
if not text:
print("Sorry, we can't search for nothing.")
return
matches = search_folders(folder, text)
match_count = 0
for m in matches:
match_count += 1
# print(m)
# print('----- Match -----')
# print('file: ' + m.file)
# print(f'line: {m.line}')
# print('match: ' + m.text.strip())
# print()
print(f'Found {match_count:,} matches')
def write_header():
print('--------------------------------')
print(' FILE SEARCH APP')
print('--------------------------------')
def get_folder_from_user():
folder = input('What folder do you want to search? ')
if not folder or not folder.strip():
return None
if not os.path.isdir(folder):
return None
return os.path.abspath(folder)
def get_search_text_from_user():
text = input('What are you searching for [single phrases only]? ')
return text.lower()
def search_folders(folder, text):
# print(f'Would search {folder} for {text}')
items = os.listdir(folder)
for item in items:
full_item = os.path.join(folder, item)
if os.path.isdir(full_item):
yield from search_folders(full_item, text)
else:
yield from search_file(full_item, text)
def search_file(filename, search_text):
with open(filename, 'r', encoding='utf-8') as fin:
line_num = 0
for line in fin:
line_num += 1
if line.lower().find(search_text) >= 0:
m = SearchResult(line=line_num, file=filename, text=line)
# matches.append(m)
yield m
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
104cc2492e0a10168f19360ad9fd857a9423af02 | 7f8483b792bd2dfa732a13c26590833f5902f083 | /Aula11/face_cascade.py | caf29369a8fde7f1ca1b188f6a512db8a71ec1ec | [
"MIT"
] | permissive | thiagopollachini/introducao-opencv | c905c61a65a966d50df2838ba8463a7e0725a765 | c1b63bb2aca008821489f65479c957ce4f925c80 | refs/heads/master | 2021-09-13T23:50:20.365076 | 2018-05-06T03:19:51 | 2018-05-06T03:19:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 436 | py | import numpy as np
import cv2
face_cascade = cv2.CascadeClassifier('../xml/haarcascade_frontalface_default.xml')
img = cv2.imread('../images/messi.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
mascara = cv2.imread('../images/mascara.jpg')
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
cv2.imshow('img',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
"[email protected]"
] | |
df4bd03b7db36d8c41bc18a14ff237e55771ca14 | d609e6521735df2f62463e6aaa0b07d9fab16e18 | /ttsx/ttsx/settings.py | 15e225b9829fcfbd0b8cc6105c4967b0a761c501 | [] | no_license | pythonxiaohei/- | 4d2b698c15c24e104dca25879b8d54b0f1f42635 | d29719b40c3966be7815a07e684ed052ed448a5a | refs/heads/master | 2020-04-05T15:36:24.847125 | 2018-11-10T14:12:07 | 2018-11-10T14:12:07 | 156,976,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,082 | py | """
Django settings for ttsx project.
Generated by 'django-admin startproject' using Django 2.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '9-r7w(#cg5nh^$lfxvqc5=5=(e0c#(8b486#)y@u=smm(5k3%y'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ttsx.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ttsx.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
7bf1e0047a530d9dd1719be95fd901b4e3c3bb99 | f7d47249f7e74bec51eacaa05f381674b92e3611 | /interview/属性property.py | 3c2afe43812b4bef2d161d13462e5c02ba739369 | [] | no_license | jinlijiang123/crawler | f96764bc5e7ae6f254e397189c4228336889a0d1 | cd3f16d04cc7c83b78d5a78afa7a57951399d490 | refs/heads/master | 2020-06-15T19:44:48.892953 | 2019-03-26T04:35:29 | 2019-03-26T04:35:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 874 | py | # -*- coding: utf-8 -*-
不懂!!!
一.私有属性添加get和set方法 __XX
class Money(object):
def __init__(self):
self.__money = 0
def getMoney(self):
return self.__money
def setMoney(self, value):
if isinstance(value, int):
self.__money = value
else:
print("error:不是整型数字")
二.使用property升级get和set方法
get_set.py
class Money(object):
def __init__(self):
self.__money = 0
def getMoney(self):
return self.__money
def setMoney(self, value):
if isinstance(value, int):
self.__money = value
else:
print("error:不是整型数字")
money = property(getMoney,setMoney)
#运行
from get_set import Money
a = Money()
a.money #0
a.money = 100 # 100
a.getMoney() #100
| [
"[email protected]"
] | |
12c911a541ec8c9cb6c05676b1a960acae2f2fee | 7ab19008d25b4b24e851f61f898cf3c127f354b9 | /Exercise/Wheel of Fortune Analysis/wof_analysis.py | 203da4a83094ee22757151cb2cbb3e48c5c775e5 | [] | no_license | doyoonkim3312/PythonPractice | 588d3dfa37cd704b05f1a704076f8dfc1c55b7de | dd9cb3db39610d709d9082e59fe0a1598e1b4c51 | refs/heads/master | 2023-04-25T04:08:06.706533 | 2021-05-13T13:37:12 | 2021-05-13T13:37:12 | 334,782,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,667 | py | ###############################################################################
# Author: Doyoon Kim ([email protected] / [email protected])
# Date: Mar 10, 2021
# Description This program counts total number of alphabet usage in phrases.txt
# file.
###############################################################################
import matplotlib.pyplot as plt
from string import ascii_letters
def read_file():
alphabetList: list = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S'
, 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
result: dict = {}
counter = 0
totalLetter = 0
file = open('phrases.txt', 'r')
phrases = file.read()
file.close()
for letter in alphabetList:
for line in phrases.split("\n"):
for char in line:
if char in ascii_letters:
totalLetter += 1
if char.upper() == letter:
counter += 1
else:
continue
result[letter] = counter
counter = 0
return result, totalLetter
def main():
a, totalLetter = read_file()
keys: list = a.keys()
values: list = a.values()
valuesModified: list = list()
for value in values:
valuesModified.append(value / totalLetter)
fig, chart = plt.subplots()
chart.set_title("Letter Frequency in Puzzle Phrases")
chart.set_xlabel("Letter")
chart.set_ylabel("Letter Appearance Frequency")
chart.bar(keys, valuesModified)
plt.savefig("wof_analysis.pdf")
if __name__ == '__main__':
main()
plt.show()
| [
"[email protected]"
] | |
e75d6e09e3769b7572600c10891972aa35c71a29 | 3f51d8812a731fddefe80233e209ad19f1407531 | /django/form/articles/urls.py | b92978783195392394560844eb3b3b0a07008ca6 | [] | no_license | mizm/TIL-c9 | 281336cccc74809dfd5e4aa6cff0114a37059155 | 7b4c561bcbc32533852cb025a4823487caf981d6 | refs/heads/master | 2020-04-17T17:17:35.861307 | 2019-05-09T09:00:12 | 2019-05-09T09:00:12 | 166,776,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | from django.urls import path
from . import views
app_name ='articles'
urlpatterns = [
path('new/',views.create, name ='create'),
path('<int:article_id>/', views.detail, name='detail'),
path('<int:article_id>/edit/', views.update , name='update'),
] | [
"[email protected]"
] | |
38bdc3ea6ca947e1c4f96e747419a4f868b1ce18 | 08675cdb44a89ae3b0794754213d26a5e8165158 | /Week 1/test.py | 6d1211a9b56fa1b48fe7967a104d3eff7aa7ef8b | [] | no_license | thunderbird781/raspberry1 | b8e65eb57cd9938f4f5db20bbe181c34eb74da73 | b8910968f5215e936ca93a0a5d007411e4b86201 | refs/heads/master | 2021-01-18T11:57:04.547305 | 2017-03-09T00:16:20 | 2017-03-09T00:16:20 | 84,328,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40 | py | print ("hello code hive") #test program
| [
"[email protected]"
] | |
13f0520cb06c1d5b01c72e414a8177cbf260b97e | e367447d1c31dcb73f9c70f5b9aefd2973d7c0f5 | /app/api/views/authentication/auth_view.py | e9efc7e7083756237eca03c08fca24f0fe645c74 | [] | no_license | Georgeygigz/the-habitat | 4a2ffc2314cb918b6e8cee724f6f0b2677d4de5a | b3ecdbe494d380d01e74ba25f6a669886e28a340 | refs/heads/develop | 2020-07-11T01:38:36.892115 | 2019-12-06T09:21:52 | 2019-12-06T09:21:52 | 204,420,182 | 0 | 1 | null | 2019-12-06T09:21:54 | 2019-08-26T07:26:25 | Python | UTF-8 | Python | false | false | 4,088 | py | # app/api/v1/views/auth_views.py
"""This is where all authentication Endpoints will be captured."""
import re
from flask_jwt_extended import (create_access_token, jwt_required,get_raw_jwt)
from flask import request, jsonify, make_response
import datetime
from functools import wraps
from passlib.hash import sha256_crypt
from flask_restful import Resource, reqparse
from app.api.models.auth_modles import User;
from app.api.schemas.auth_shema import UserSchema
# import class products
# from app.api.v2.models.store_model import Users
from app.api.utils.authorization import admin_required
blacklist = set()
class CreateAccount(Resource):
"""Create a new account."""
def post(self):
"""Create an account for new user."""
users = User.query.all()
data = request.get_json(force=True)
user_id = 2
username = data["username"]
email = data["email"]
password = data["password"]
user_type = data["user_type"]
current_user = [user for user in users if user.email == email]
if current_user:
return make_response(jsonify({"message": "{} Already Exist".format(current_user[0].email)}), 409)#Bad request
if not re.match(
r'^[A-Za-z0-9\.\+_-]+@[A-Za-z0-9\._-]+\.[a-zA-Z]*$',
request.json['email']):
return make_response(jsonify({"message": "invalid Email"}), 400)#Bad request
if not re.match(
'(?=.*[a-z])(?=.*[A-Z])(?=.*[0-9])(?=.*[@#$])',
request.json['password']):
return make_response(jsonify({"message": "invalid password"}), 400)#Bad request
new_user_detail = {"user_id": len(users)+1,
"username": username,
"email": email,
"password": sha256_crypt.hash(password),
"user_type": user_type}
schema = UserSchema()
data1 = schema.load_object_into_schema(new_user_detail)
new_data = User(**data1)
new_data.save()
return make_response(
jsonify({"message": "Account created successfuly"}), 201)#created
return make_response(jsonify(
{"message": " {} Aready Exist".format(request.json['email'])}), 409) # conflict
class Login(Resource):
"""Login Endpoint."""
def post(self):
data = request.get_json(force=True)
email = data['email']
get_password = data['password']
cur_user = User.query.filter(User.email==email).first()
if cur_user:
password = cur_user.password
if sha256_crypt.verify(get_password, password):
token = create_access_token(identity=cur_user.email)
result = {"message": "Login succesful", "token": token}
else:
return make_response(
jsonify({"message": "Incorrect Password"}), 401)#unauthorized
else:
return make_response(
jsonify({"message": "Incorrect Email. If have not account, contact Admin"}), 401)#unauthorized
return result, 200 #ok
class UpdateUserRole(Resource):
@jwt_required
@admin_required
def put(self, user_id):
"""Update user role."""
users = Users().get_all_users()
data = request.get_json(force=True)
role = (data["role"]).lower()
current_user = User.query.filter(User.user_id==user_id).first()
# update_user = [user for user in users if user['user_id'] == user_id]
if not current_user:
return make_response(jsonify({'Error': "User Not found"}), 400) #Bad request
user = Users()
user.update_user(user_id, role)
return make_response(jsonify(
{'Message': "{} Updated Successfuly".format(current_user.username)}), 200) #ok
class Logout(Resource):
@jwt_required
def delete(self):
jti = get_raw_jwt()['jti']
blacklist.add(jti)
return make_response(jsonify({"message": "Successfully logged out"}), 200)#ok
| [
"[email protected]"
] | |
7e3d14006f561e8a72914984adf13b12dfab3a88 | b48b47fcdbf46c43633be8ffaa9ad271892ebb07 | /Explanations/11-IntroToOOP__PT2/base_class_example.py | 2fafa19dcce01e304bc7fbacd84cb4e53b6fc21e | [] | no_license | DavinderSohal/Python | 92e878c2648c9507b00fef6ee7d7ae67338219d9 | 728e77c1e55aaa00a642d76dce5d6cdf56eff788 | refs/heads/main | 2023-05-12T23:41:46.226139 | 2021-05-16T16:12:01 | 2021-05-16T16:12:01 | 340,810,583 | 0 | 0 | null | 2021-04-20T09:18:00 | 2021-02-21T03:42:21 | Python | UTF-8 | Python | false | false | 237 | py |
class A:
def __init__ (self):
print("Initializing A")
class B(A): # inherits from A
def init(self):
A.__init__ (self) #Call constructor of base class
print("Initializing B")
b = B()
#b.init()
| [
"[email protected]"
] | |
04fdf8031b7fae2dec082a66f5e3b3acb7c4362b | 0269abd1e5f9a9ddc4ff506e63930174829dfe92 | /tutnese/tutnese.py | 5c026fdeb552462d8d5ad24fba61223ed6b8582f | [] | no_license | DrkSephy/I4330 | d1abf802a07739300a7ef558cb18a9e9e67210a3 | 06dc860529db1dabe01044412f91b5fb162f9756 | refs/heads/master | 2016-09-06T18:23:53.690922 | 2015-02-27T17:25:41 | 2015-02-27T17:25:41 | 31,239,901 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,996 | py | ############################
# David Leonard #
# tutnese.py #
# *~-drksephy.github.io*~- #
############################
import re
token_exprs = [
(r'squab', 'bb'),
(r'squac', 'cc'),
(r'squad', 'dd'),
(r'squaf', 'ff'),
(r'squag', 'gg'),
(r'squah', 'hh'),
(r'squaj', 'jj'),
(r'squak', 'kk'),
(r'squal', 'll'),
(r'squam', 'mm'),
(r'squan', 'nn'),
(r'squap', 'pp'),
(r'squaq', 'qq'),
(r'squar', 'rr'),
(r'squas', 'ss'),
(r'squat', 'tt'),
(r'squav', 'vv'),
(r'squaw', 'ww'),
(r'squax', 'xx'),
(r'squay', 'yy'),
(r'squaz', 'zz'),
(r'bub' , 'b'),
(r'coch' , 'c'),
(r'dud' , 'd'),
(r'fuf' , 'f'),
(r'gug' , 'g'),
(r'hash' , 'h'),
(r'jug' , 'j'),
(r'kuck' , 'k'),
(r'lul' , 'l'),
(r'mum' , 'm'),
(r'nun' , 'n'),
(r'pup' , 'p'),
(r'quack', 'q'),
(r'rur' , 'r'),
(r'sus' , 's'),
(r'tut' , 't'),
(r'vuv' , 'v'),
(r'wack' , 'w'),
(r'xux' , 'x'),
(r'yub' , 'y'),
(r'zug' , 'z'),
(r'a' , 'a'),
(r'o' , 'o'),
(r'e' , 'e'),
(r'i' , 'i'),
(r'u' , 'u'),
(r' ' , ' '),
(r',' , ','),
(r'!' , '!'),
]
language = {
'b': 'bub',
'c': 'coch',
'd': 'dud',
'f': 'fuf',
'g': 'gug',
'h': 'hash',
'j': 'jug',
'k': 'kuck',
'l': 'lul',
'm': 'mum',
'n': 'nun',
'p': 'pup',
'q': 'quack',
'r': 'rur',
's': 'sus',
't': 'tut',
'v': 'vuv',
'w': 'wack',
'x': 'xux',
'y': 'yub',
'z': 'zug'
}
doubleLanguage = {
'squab': 'bb',
'squac': 'cc',
'squad': 'dd',
'squaf': 'ff',
'squag': 'gg',
'squah': 'hh',
'squaj': 'jj',
'squak': 'kk',
'squal': 'll',
'squam': 'mm',
'squan': 'nn',
'squap': 'pp',
'squaq': 'qq',
'squar': 'rr',
'squas': 'ss',
'squat': 'tt',
'squav': 'vv',
'squaw': 'ww',
'squax': 'xx',
'squay': 'yy',
'squaz': 'zz'
}
vowels = ['a', 'o', 'e', 'i', 'u']
def encode(phrase):
lowerPhrase = phrase.lower()
words = lowerPhrase.split()
# print words
translation = []
for word in words:
currWord = ''
for i in range(len(word)):
# Store current letter
currLetter = word[i]
nextLetter = ''
prevLetter = ''
# Check if character is '|'
if word[i] == '|':
raise Exception('Illegal character: |')
# Get the next letter
if i < len(word) - 1:
nextLetter = word[i + 1]
# Get the previous letter if possible
if i > 0:
prevLetter = word[i - 1]
# Handle double character case
if currLetter == nextLetter and currLetter not in vowels:
currWord += 'squa'
# Previous character case
# Now we insert the repeated character
elif currLetter == prevLetter:
currWord += currLetter
elif currLetter in language.keys():
currWord += language[currLetter]
else:
currWord += currLetter
translation.append(currWord)
return ' '.join(translation)
def decode(phrase):
translation = tutneseLex(phrase)
newTranslation = ''
for char in translation:
newTranslation += char
return newTranslation
def tutneseLexer(characters, token_exprs):
pos = 0
tokens = []
while pos < len(characters):
match = None
for token_expr in token_exprs:
pattern, tag = token_expr
regex = re.compile(pattern)
match = regex.match(characters, pos)
if match:
text = match.group(0)
if tag:
token = tag
tokens.append(token)
break
if not match:
sys.stderr.write('Illegal character: %s\n' % characters[pos])
sys.exit(1)
else:
pos = match.end(0)
return tokens
# Create our lexer function
def tutneseLex(characters):
return tutneseLexer(characters, token_exprs)
print encode('Over hill, over dale, Thorough bush, thorough brier, Over park, over pale, Thorough flood, thorough fire!')
print decode('ovuverur hashisqual, ovuverur dudalule, tuthashorurougughash bubusushash, tuthashorurougughash bubrurierur, ovuverur puparurkuck, ovuverur pupalule, tuthashorurougughash fufluloodud, tuthashorurougughash fufirure!')
| [
"[email protected]"
] | |
df11bc1e05545abc15b0e5c0f2bcbf74a4cc842e | f5e909ad8658920c5bd7e90ab712c8658b32ce52 | /BoxTest.py | 183a6e567d0ad8807a32a64c82fcc6646779406b | [] | no_license | pmartel/Sudoku | 4ab4b132034427faadca18eb2cba626709bfacac | d1ff436db6c44ecf5ab12a7fc7f0801b903de887 | refs/heads/master | 2021-01-06T20:46:48.983784 | 2018-10-08T18:20:31 | 2018-10-08T18:20:31 | 26,099,982 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 118 | py | """ test how to get row.col from box"""
for b in range(9):
print('b',b)
print('row',(b//3)*3)
print('col',(b*3)%9)
| [
"[email protected]"
] | |
122971286c85e126dcc8e82ca753e18a2e855065 | 9ddfd30620c39fb73ac57e79eae0a001c45db45f | /addons/activity_crm/models/activity.py | eed0da88d3f782f237d7f7aadb7cb8904596e0d0 | [] | no_license | zamzamintl/silver | a89bacc1ba6a7a59de1a92e3f7c149df0468e185 | 8628e4419c4ee77928c04c1591311707acd2465e | refs/heads/master | 2023-01-06T20:29:25.372314 | 2020-10-29T21:02:41 | 2020-10-29T21:02:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 721 | py | import logging
from odoo import fields, http, tools, _
from odoo.http import request
_logger = logging.getLogger(__name__)
from odoo import api, fields ,models
from odoo.exceptions import ValidationError
from odoo.http import request
class activity(models.Model):
_inherit='helpdesk.ticket'
@api.constrains("lead_id")
def get_activity(self):
_logger.info("RRRRRRRRRRRRRRRRRRRR")
model=self.env['ir.model'].search([('model','=','crm.lead')])
act=self.env['mail.activity.type'].search([('name','=','create by Ticket')])
if self.lead_id:
self.lead_id.activity_ids.create({'res_id':self.lead_id.id,'activity_type_id':act.id,'res_model_id':model.id})
| [
"[email protected]"
] | |
858a0f511aa87c57bf19b04939a8ccae247e053d | fbbaf67d3a8992eb3409416519773f2efb798277 | /notebooks/training/Training_sample_PJ.py | 3c8d1257753e7f05ff278e93e4900716d8752c6d | [] | no_license | miquelescobar/asmr-is-all-you-need | 52eb9e166fd4437abd48c6b78b48f51f9ad472a1 | 1d3e441320f7050edb60a83793e17b4aaf8462c7 | refs/heads/master | 2023-08-17T05:09:19.118356 | 2021-09-13T14:38:49 | 2021-09-13T14:38:49 | 406,009,107 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,242 | py | #!/usr/bin/env python
# coding: utf-8
# # WaveRNN - Fit a Sample
# In[1]:
import os
os.chdir('/veu4/usuaris26/footanalytics/segmentation/asmr-is-all-you-need/notebooks/training')
print(os.getcwd())
import sys
sys.path.append('../../')
sys.path.append('../../network')
# In[2]:
import time, sys, math
import numpy as np
import torch
from torch import optim
import torch.nn as nn
import torch.nn.functional as F
from scipy.io import wavfile
from librosa import resample
from src.display import *
from src.dsp import *
from models.wavernn import WaveRNN
# In[3]:
import matplotlib.pyplot as plt
import time, sys, math
import numpy as np
def plot(array) :
fig = plt.figure(figsize=(30, 5))
ax = fig.add_subplot(111)
ax.xaxis.label.set_color('grey')
ax.yaxis.label.set_color('grey')
ax.xaxis.label.set_fontsize(23)
ax.yaxis.label.set_fontsize(23)
ax.tick_params(axis='x', colors='grey', labelsize=23)
ax.tick_params(axis='y', colors='grey', labelsize=23)
plt.plot(array)
# In[4]:
notebook_name = 'glass_sample_PJ_v1'
sample_rate = 22050
epochs = 10000
# In[5]:
sr, sample = wavfile.read('../../data/audio/prova_glass.wav')
# In[6]:
#Stereo to mono
sample = sample.sum(axis=1) / 2
# In[7]:
#downsample from original freq to sample_rate
sample = resample(sample, orig_sr = sr, target_sr = sample_rate)
# In[8]:
sample = sample.astype(np.int16)
# In[9]:
print(sample.max(), sample.min())
# ### Split/Combine Functions
# In[22]:
def split_signal(x) :
unsigned = x + 2**15
coarse = unsigned // 256
fine = unsigned % 256
return coarse, fine
def split_signal_PJ(x) :
minim = x.min()
maxim = x.max()
unsigned = ((x-minim) / (maxim-minim)) * 2**16
unsigned = unsigned.astype(np.int16)
coarse = unsigned // 256
fine = unsigned % 256
return coarse, fine
# In[23]:
def combine_signal(coarse, fine) :
return coarse * 256 + fine - 2**15
# In[24]:
plot(sample[73000:73100])
# In[25]:
coarse_classes, fine_classes = split_signal_PJ(sample)
# In[26]:
plot(coarse_classes[73000:73100])
# In[27]:
plot(fine_classes[73000:73100])
# ### Train Model
# In[28]:
model = WaveRNN().cuda()
# In[29]:
coarse_classes, fine_classes = split_signal(sample)
# In[30]:
batch_size = 128 # 8gb gpu
coarse_classes = coarse_classes[:len(coarse_classes) // batch_size * batch_size]
fine_classes = fine_classes[:len(fine_classes) // batch_size * batch_size]
coarse_classes = np.reshape(coarse_classes, (batch_size, -1))
fine_classes = np.reshape(fine_classes, (batch_size, -1))
# In[31]:
coarse_classes.shape
# In[44]:
total_losses = []
coarse_losses = []
fine_losses = []
# In[50]:
def train(model, optimizer, num_steps, batch_size, lr=1e-3, seq_len=960) :
for p in optimizer.param_groups : p['lr'] = lr
start = time.time()
running_loss = 0
running_c_loss = 0
running_f_loss = 0
for step in range(num_steps) :
loss = 0
sum_loss_coarse = 0
sum_loss_fine = 0
hidden = model.init_hidden(batch_size)
optimizer.zero_grad()
rand_idx = np.random.randint(0, coarse_classes.shape[1] - seq_len - 1)
x_coarse = coarse_classes[:, rand_idx:rand_idx + seq_len]
x_coarse = torch.FloatTensor(x_coarse)
x_coarse = x_coarse / 127.5 - 1.
x_fine = fine_classes[:, rand_idx:rand_idx + seq_len]
x_fine = torch.FloatTensor(x_fine)
x_fine = x_fine / 127.5 - 1.
y_coarse = coarse_classes[:, rand_idx + 1:rand_idx + seq_len + 1]
y_coarse = torch.LongTensor(y_coarse)
y_fine = fine_classes[:, rand_idx + 1: rand_idx + seq_len + 1]
y_fine = torch.LongTensor(y_fine)
for i in range(seq_len) :
x_c_in = x_coarse[:, i:i + 1]
x_f_in = x_fine[:, i:i + 1]
x_input = torch.cat([x_c_in, x_f_in], dim=1)
x_input = x_input.cuda()
c_target = y_coarse[:, i].cuda()
f_target = y_fine[:, i].cuda()
current_coarse = c_target.float() / 127.5 - 1.
current_coarse = current_coarse.unsqueeze(-1)
out_coarse, out_fine, hidden = model(x_input, hidden, current_coarse)
loss_coarse = F.cross_entropy(out_coarse, c_target)
loss_fine = F.cross_entropy(out_fine, f_target)
loss += (loss_coarse + loss_fine)
sum_loss_coarse += loss_coarse
sum_loss_fine += loss_fine
running_loss += (loss.item() / seq_len)
running_c_loss += (sum_loss_coarse.item() / seq_len)
running_f_loss += (sum_loss_fine.item() / seq_len)
loss.backward()
optimizer.step()
total_losses.append(running_loss / (step + 1))
coarse_losses.append(running_c_loss / (step + 1))
fine_losses.append(running_f_loss / (step + 1))
elapsed = time_since(start)
speed = (step + 1) / (time.time() - start)
stream('Step: %i/%i --- Loss: %.3f --- %s --- @ %.2f batches/sec ',
(step + 1, num_steps, running_loss / (step + 1), elapsed, speed))
# In[51]:
optimizer = optim.Adam(model.parameters())
# In[52]:
train(model, optimizer, num_steps=epochs, batch_size=batch_size, lr=1e-3)
# In[53]:
torch.save(model.state_dict(), f'../../network/weights/wavernn/model_{notebook_name}.pt')
# In[54]:
plt.plot(total_losses)
plt.show()
plt.plot(coarse_losses)
plt.show()
plt.plot(fine_losses)
plt.show()
# In[55]:
def save_wav(y, filename, sample_rate) :
y = np.clip(y, -2**15, 2**15 - 1)
wavfile.write(filename, sample_rate, y.astype(np.int16))
# In[56]:
num_samples = sample_rate * 15
output, c, f = model.generate(num_samples)
our_output = combine_signal(c, f)
# In[57]:
versio = 1
material = "glass"
output_path = f'gen_{notebook_name}_{material}_{versio}.wav'
save_wav(output, output_path, sample_rate)
# In[ ]:
versio = 1
material = "our_glass"
output_path = f'../../network/outputs/gen_{notebook_name}_{material}_{versio}.wav'
save_wav(our_output, output_path, sample_rate)
| [
"[email protected]"
] | |
5420182c1b959f1d61944b6198a9cce2a52b0982 | f64ea492e05ab27448a08738b7e5a18b3b322e9c | /tortugaFree.py | 8e491666bd2afe93f275837081afe992274b9205 | [] | no_license | anbreaker/carreraPyGame | aa73e5dd8306cecfb87df987441cecd4228c1380 | 26294d23b1f69acbe99a8bfe5528ad2de866f07b | refs/heads/master | 2020-07-19T05:55:41.492554 | 2019-09-05T14:04:11 | 2019-09-05T14:04:11 | 206,387,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,595 | py | import pygame
from pygame.locals import *
import sys
import random
class Runner():
#tupla de personajes (imagenes) de los runners
__custome =('turtle', 'fish', 'prawn', 'moray', 'octopus')
def __init__(self, x = 0, y = 0, custome = None):
#coloco imagen del corredor
ixCustome = random.randint(0,4)
self.custome = pygame.image.load("images/{}.png".format(self.__custome[ixCustome]))
self.position = [x,y]
self.name = custome
def avanzar(self):
self.position[0] += random.randint(1,3)
class Game():
# Constructor por defecto
def __init__(self, width, height):
#Inicializamos el display
self.__screen = pygame.display.set_mode((640,480))
#Cargamos la imagen de fondo del display
self.background = pygame.image.load("images/background.png")
#Titulo de la pantalla
self.__title = "Carrera de bichos"
self.runner = Runner (320,240)
def start(self):
gameOver = False
while not gameOver:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
elif event == KEYDOWN:
if event.key() == K_UP:
# Mover Arriba
self.runner.position[1] -= 5
pritn('le estoy dando ARRIBA')
elif event.key() == K_DOWN:
# Mover Abajo
self.runner.position[1] += 5
pritn('le estoy dando ABAJO')
elif event.key() == K_LEFT:
# Mover Izquierda
self.runner.position[0] -= 5
pritn('le estoy dando IZQUIERDA')
elif event.key() == K_RIGHT:
# Mover Derecha
self.runner.position[0] += 5
pritn('le estoy dando DERECHA')
else:
pass
#Renderizado de la pantalla
#Nos pinta la pantalla con el background
self.__screen.blit(self.background, (0,0))
self.__screen.blit(self.runner.custome, self.runner.position)
#refresco de pantalla
pygame.display.flip()
if __name__ == '__main__':
pygame.font.init()
game = Game(640, 480)
game.start() | [
"[email protected]"
] | |
7c781cf14b7409060a0c856ddaa0b58260d11028 | 2618a298c764bd364ef86fc077ceaa4fd1c64a60 | /multi_maze/agent.py | 9a4a42e132d7b233d6c77a207900e19096fa8a99 | [] | no_license | sculyi/Deep-Learning | 03017bc33f53a72490ee0ceb58e7bdd0db6e34c5 | 0ef7daa8f9ff311b82bef79e1eed50ed3e4a1442 | refs/heads/master | 2021-01-15T17:50:17.482112 | 2017-12-02T04:14:53 | 2017-12-02T04:14:53 | 99,761,914 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,077 | py | import numpy as np
class MovingAgent():
def __init__(self,init_pos=[0,0], dst_pos=[0,0],init_head=0,width=50, height=50):
#print("start pos = {}, dst pos={}".format(init_pos,dst_pos))
self.last_pos = []# save history positions of the agent
self.cur_pos = init_pos # start position
#init direction
self.init_head = init_head
self.dst_pos = dst_pos #destination position
self.tk_id = -1 # object id in canvas
self.all_pos=[{"l":self.last_pos,"c":self.cur_pos}] #save the positions at every second
self.last_action = 0
#canvas config
self.width = width
self.height = height
def setTkId(self,tk_id,name):
self.tk_id = tk_id
self.name = name
def isDst(self,pos_):
return pos_ == self.dst_pos
'''
def head_type(self,dx,dy):
#|/_\|(0,1,2,3,4) /_\(5,6,7)
if dx == 0:
if dy > 0:
return 0
elif dy < 0:
return 4
elif dx > 0:
if dy > 0:
return 1
elif dy == 0:
return 2
elif dy < 0:
return 3
elif dx < 0:
if dy == 0:
return 6
elif dy < 0:
return 5
elif dy > 0:
return 7
def move(self,head_type,action):
if head_type == 0:
if action == 0:
return 0,1
elif action == 1:
return -1,1
elif action == 2:
return 1,1
if head_type == 1:
if action == 0:
return 1,1
elif action == 1:
return 0,1
elif action == 2:
return 1,0
if head_type == 2:
if action == 0:
return 1,0
elif action == 1:
return 1,1
elif action == 2:
return 1,-1
if head_type == 3:
if action == 0:
return -1,-1
elif action == 1:
return 1,0
elif action == 2:
return 0,-1
if head_type == 4:
if action == 0:
return 0,-1
elif action == 1:
return 1,-1
elif action == 2:
return -1,-1
if head_type == 5:
if action == 0:
return -1,-1
elif action == 1:
return 0,-1
elif action == 2:
return -1,0
if head_type == 6:
if action == 0:
return -1,0
elif action == 1:
return -1,-1
elif action == 2:
return -1,1
if head_type == 7:
if action == 0:
return -1,1
elif action == 1:
return -1,0
elif action == 2:
return 0,1
'''
#calc the moving direction according to the dx and dy
def head_type(self,dx,dy):
#|/_\|(0,1,2,3,4) /_\(5,6,7)
if dx == 0:
if dy < 0: return 0
elif dy > 0: return 4
elif dy == 0: return 8
elif dx > 0:
if dy < 0: return 1
elif dy == 0: return 2
elif dy > 0: return 3
elif dx < 0:
if dy == 0: return 6
elif dy > 0: return 5
elif dy < 0: return 7
#got dx and dy of next step by the head and action
def move(self,head_type,action):
action -= 1 # temp process
if head_type == 0:
if action == 0: return 0,-1
elif action == 1: return -1,-1
elif action == 2: return 1,-1
elif head_type == 1:
if action == 0: return 1,-1
elif action == 1: return 0,-1
elif action == 2: return 1,0
elif head_type == 2:
if action == 0: return 1,0
elif action == 1: return 1,-1
elif action == 2: return 1,1
elif head_type == 3:
if action == 0: return -1,1
elif action == 1: return 1,0
elif action == 2: return 0,1
elif head_type == 4:
if action == 0: return 0,1
elif action == 1: return 1,1
elif action == 2: return -1,1
elif head_type == 5:
if action == 0: return -1,1
elif action == 1: return 0,1
elif action == 2: return -1,0
elif head_type == 6:
if action == 0: return -1,0
elif action == 1: return -1,1
elif action == 2: return -1,-1
elif head_type == 7:
if action == 0: return -1,-1
elif action == 1: return -1,0
elif action == 2: return 0,-1
else: return 0,0
#step
def step(self,action):
x,y=0,0
if action != 0:
if len(self.last_pos) == 0:
ht = self.init_head
else:
for i in range(0,len(self.all_pos),1):
d_pos = self.all_pos[-(i+1)]
last,cur=d_pos['l'],d_pos['c']
if last != cur:
if len(last)==0: ht = self.init_head
else:
#calc head_type by the adjacent position
dx = cur[0] - last[0]
dy = cur[1] - last[1]
# print(dx,dy)
ht = self.head_type(dx, dy)
break
x,y = self.move(ht,action)
#print(self.cur_pos)
#avoid continuous zero action(keep standing)
self.last_pos = [i for i in self.cur_pos]
self.cur_pos[0] += x
self.cur_pos[1] += y
if self.cur_pos[0] <0 or self.cur_pos[0] > self.width or \
self.cur_pos[1] < 0 or self.cur_pos[1] > self.height:
self.cur_pos = [i for i in self.last_pos]
x = 0
y = 0
'''
if self.cur_pos[0] <=0:
self.cur_pos[0] = 0
if self.cur_pos[0] >= self.width-1:
self.cur_pos[0] = self.width-1
if self.cur_pos[1] <=0:
self.cur_pos[1]=0
if self.cur_pos[1] >= self.height-1:
self.cur_pos[1] = self.height-1
'''
#print(self.last_pos,self.cur_pos)
#self.cur_pos[0] = int(self.cur_pos[0] )
#self.cur_pos[1] = int(self.cur_pos[1] )
self.all_pos.append({"l": self.last_pos, "c": self.cur_pos})
dir_error = False#curently, discarded
if (self.last_action == 2 and action == 3) or (self.last_action == 3 and action == 2):
dir_error = True
self.last_action = action
return x, y, self.isDst(self.cur_pos), dir_error | [
"[email protected]"
] | |
c7c76a95e132383229ab1880ccf18627e229417e | 2a979b2dfc1e9bce8d93fb8c2d219c0122e50b66 | /SWEA_D1D2/1976. 시각 덧셈.py | 48132df3b4b80dd126a4ea219a8f86f9105af43b | [] | no_license | JohnHamm0nd/algorithm | 8b1bf1b58790da0b8c6482b6ce068fd29316a374 | 13c00cbc270cc197327e97f61f7d2ed80dc81358 | refs/heads/master | 2020-07-07T07:43:04.523954 | 2019-11-07T02:54:55 | 2019-11-07T02:54:55 | 203,294,261 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,284 | py | """
시 분으로 이루어진 시각을 2개 입력 받아, 더한 값을 시 분으로 출력하는 프로그램을 작성하라.
(시각은 12시간제로 표시한다. 즉, 시가 가질 수 있는 값은 1시부터 12시이다.)
[제약 사항]
시는 1 이상 12 이하의 정수이다. 분은 0 이상 59 이하의 정수이다.
[입력]
가장 첫 줄에는 테스트 케이스의 개수 T가 주어지고, 그 아래로 각 테스트 케이스가 주어진다.
각 테스트 케이스의 첫 번째 줄에는 4개의 수가 주어진다.
첫 번째 수가 시를 나타내고 두 번째 수가 분을 나타낸다. 그 다음 같은 형식으로 두 번째 시각이 주어진다.
[출력]
출력의 각 줄은 '#t'로 시작하고 공백을 한 칸 둔 다음, 시를 출력하고 공백을 한 칸 둔 다음 분을 출력한다.
(t는 테스트 케이스의 번호를 의미하며 1부터 시작한다.)
"""
import sys
sys.stdin = open('1976.txt', 'r')
T = int(input())
for t in range(T):
times = list(map(int, input().split()))
hour = times[0] + times[2]
minute = times[1] + times[3]
if minute >= 60:
hour += 1
minute -= 60
if hour >= 12:
hour -= 12
print(f'#{t+1} {hour} {minute}')
| [
"[email protected]"
] | |
6b6373b045d60a93ea0b36da1938223cd68426cf | 279413691f027db9a0b4d545899db9f76ebb8464 | /r1.py | 181eec3c4ab70d8ee95229ed5b9348443ca88aa6 | [] | no_license | T-Javis/RNN-Binary-system-plus | 0a742e0cb3c6d5b4fed07499c9d52a772dd081b4 | e841852fc913658354c8c2c63427f397a4e8ff66 | refs/heads/master | 2021-01-20T12:28:40.503160 | 2017-02-21T08:48:51 | 2017-02-21T08:48:51 | 82,657,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,505 | py | import copy, numpy as np
np.random.seed(0)
# compute sigmoid nonlinearity
def sigmoid(x):
output = 1/(1+np.exp(-x))
return output
# convert output of sigmoid function to its derivative
def sigmoid_output_to_derivative(output):
return output*(1-output)
# training dataset generation
int2binary = {}
binary_dim = 8
largest_number = pow(2,binary_dim)
binary = np.unpackbits(
np.array([range(largest_number)],dtype=np.uint8).T,axis=1)
for i in range(largest_number):
int2binary[i] = binary[i]
# input variables
alpha = 0.1
input_dim = 2
hidden_dim = 16
output_dim = 1
# initialize neural network weights
synapse_0 = 2*np.random.random((input_dim,hidden_dim)) - 1
synapse_1 = 2*np.random.random((hidden_dim,output_dim)) - 1
synapse_h = 2*np.random.random((hidden_dim,hidden_dim)) - 1
synapse_0_update = np.zeros_like(synapse_0)
synapse_1_update = np.zeros_like(synapse_1)
synapse_h_update = np.zeros_like(synapse_h)
# training logic
for j in range(10000):
# generate a simple addition problem (a + b = c)
a_int = np.random.randint(largest_number/2) # int version
a = int2binary[a_int] # binary encoding
b_int = np.random.randint(largest_number/2) # int version
b = int2binary[b_int] # binary encoding
# true answer
c_int = a_int + b_int
c = int2binary[c_int]
# where we'll store our best guess (binary encoded)
d = np.zeros_like(c)
overallError = 0
layer_2_deltas = list()
layer_1_values = list()
layer_1_values.append(np.zeros(hidden_dim))
# moving along the positions in the binary encoding
for position in range(binary_dim):
# generate input and output
X = np.array([[a[binary_dim - position - 1],b[binary_dim - position - 1]]])
y = np.array([[c[binary_dim - position - 1]]]).T
# hidden layer (input ~+ prev_hidden)
layer_1 = sigmoid(np.dot(X,synapse_0) + np.dot(layer_1_values[-1],synapse_h))
# output layer (new binary representation)
layer_2 = sigmoid(np.dot(layer_1,synapse_1))
# did we miss?... if so by how much?
layer_2_error = y - layer_2
layer_2_deltas.append((layer_2_error)*sigmoid_output_to_derivative(layer_2))
overallError += np.abs(layer_2_error[0])
# decode estimate so we can print it out
d[binary_dim - position - 1] = np.round(layer_2[0][0])
# store hidden layer so we can use it in the next timestep
layer_1_values.append(copy.deepcopy(layer_1))
future_layer_1_delta = np.zeros(hidden_dim)
for position in range(binary_dim):
X = np.array([[a[position],b[position]]])
layer_1 = layer_1_values[-position-1]
prev_layer_1 = layer_1_values[-position-2]
# error at output layer
layer_2_delta = layer_2_deltas[-position-1]
# error at hidden layer
layer_1_delta = (future_layer_1_delta.dot(synapse_h.T) +
layer_2_delta.dot(synapse_1.T)) * sigmoid_output_to_derivative(layer_1)
# let's update all our weights so we can try again
synapse_1_update += np.atleast_2d(layer_1).T.dot(layer_2_delta)
synapse_h_update += np.atleast_2d(prev_layer_1).T.dot(layer_1_delta)
synapse_0_update += X.T.dot(layer_1_delta)
future_layer_1_delta = layer_1_delta
synapse_0 += synapse_0_update * alpha
synapse_1 += synapse_1_update * alpha
synapse_h += synapse_h_update * alpha
synapse_0_update *= 0
synapse_1_update *= 0
synapse_h_update *= 0
# print out progress
if(j % 1000 == 0):
print "Error:" + str(overallError)
print "Pred:" + str(d)
print "True:" + str(c)
out = 0
for index,x in enumerate(reversed(d)):
out += x*pow(2,index)
print str(a_int) + " + " + str(b_int) + " = " + str(out)
print "------------"
'''
Lines 0-2:导入依赖包,设定随机数生成的种子。我们只需要两个依赖包,numpy和copy。numpy是为了矩阵计算,copy用来拷贝东西。
Lines 4-11:我们的非线性函数与其导数,更多的细节可见参考我们之前的博客:http://blog.csdn.net/zzukun/article/details/49556715
Line 15:这一行声明了一个查找表,这个表是一个实数与对应二进制表示的映射。二进制表示将会是我们网路的输入与输出,所以这个查找表将会帮助我们将实数转化为其二进制表示。
Line 16:这里设置了二进制数的最大长度。如果一切都调试好了,你可以把它调整为一个非常大的数。
Line 18:这里计算了跟二进制最大长度对应的可以表示的最大十进制数。
Line 19:这里生成了十进制数转二进制数的查找表,并将其复制到int2binary里面。虽然说这一步不是必需的,但是这样的话理解起来会更方便。
Line 26:这里设置了学习速率。
Line 27:我们要把两个数加起来,所以我们一次要输入两位字符。如此以来,我们的网络就需要两个输入。
Line 28:这是隐含层的大小,回来存储“携带位”。需要注意的是,它的大小比原理上所需的要大。自己尝试着调整一下这个值,然后看看它如何影响收敛速率。更高的隐含层维度会使训练变慢还是变快?更多或是更少的迭代次数?
Line 29:我们只是预测和的值,也就是一个数。如此,我们只需一个输出。
Line 33:这个权值矩阵连接了输入层与隐含层,如此它就有“imput_dim”行以及“hidden_dim”列(假如你不改参数的话就是2×16)。
Line 34:这个权值矩阵连接了隐含层与输出层,如此它就有“hidden_dim”行以及“output_dim”列(假如你不改参数的话就是16×1)。
Line 35:这个权值矩阵连接了前一时刻的隐含层与现在时刻的隐含层。它同样连接了当前时刻的隐含层与下一时刻的隐含层。如此以来,它就有隐含层维度大小(hidden_dim)的行与隐含层维度大小(hidden_dim)的列(假如你没有修改参数就是16×16)。
Line 37-39:这里存储权值更新。在我们积累了一些权值更新以后,我们再去更新权值。这里先放一放,稍后我们再详细讨论。
Line 42:我们迭代训练样例10000次。
Line 45:这里我们要随机生成一个在范围内的加法问题。所以我们生成一个在0到最大值一半之间的整数。如果我们允许网络的表示超过这个范围,那么把两个数加起来就有可能溢出(比如一个很大的数导致我们的位数不能表示)。所以说,我们只把加法要加的两个数字设定在小于最大值的一半。
Line 46:我们查找a_int对应的二进制表示,然后把它存进a里面。
Line 48:原理同45行。
Line 49:原理同46行。
Line 52:我们计算加法的正确结果。
Line 53:把正确结果转化为二进制表示。
Line 56:初始化一个空的二进制数组,用来存储神经网络的预测值(便于我们最后输出)。你也可以不这样做,但是我觉得这样使事情变得更符合直觉。
Line 58:重置误差值(这是我们使用的一种记录收敛的方式……可以参考之前关于反向传播与梯度下降的文章)
Line 60-61:这两个list会每个时刻不断的记录layer 2的导数值与layer 1的值。
Line 62:在0时刻是没有之前的隐含层的,所以我们初始化一个全为0的。
Line 65:这个循环是遍历二进制数字。
Line 68:X跟图片中的“layer_0”是一样的,X数组中的每个元素包含两个二进制数,其中一个来自a,一个来自b。它通过position变量从a,b中检索,从最右边往左检索。所以说,当position等于0时,就检索a最右边的一位和b最右边的一位。当position等于1时,就向左移一位。
Line 69:跟68行检索的方式一样,但是把值替代成了正确的结果(0或者1)。
Line 72:这里就是奥妙所在!一定一定一定要保证你理解这一行!!!为了建立隐含层,我们首先做了两件事。第一,我们从输入层传播到隐含层(np.dot(X,synapse_0))。然后,我们从之前的隐含层传播到现在的隐含层(np.dot(prev_layer_1.synapse_h))。在这里,layer_1_values[-1]就是取了最后一个存进去的隐含层,也就是之前的那个隐含层!然后我们把两个向量加起来!!!!然后再通过sigmoid函数。
那么,我们怎么结合之前的隐含层信息与现在的输入呢?当每个都被变量矩阵传播过以后,我们把信息加起来。
Line 75:这行看起来很眼熟吧?这跟之前的文章类似,它从隐含层传播到输出层,即输出一个预测值。
Line 78:计算一下预测误差(预测值与真实值的差)。
Line 79:这里我们把导数值存起来(上图中的芥末黄),即把每个时刻的导数值都保留着。
Line 80:计算误差的绝对值,并把它们加起来,这样我们就得到一个误差的标量(用来衡量传播)。我们最后会得到所有二进制位的误差的总和。
Line 86:将layer_1的值拷贝到另外一个数组里,这样我们就可以下一个时间使用这个值。
Line 90:我们已经完成了所有的正向传播,并且已经计算了输出层的导数,并将其存入在一个列表里了。现在我们需要做的就是反向传播,从最后一个时间点开始,反向一直到第一个。
Line 92:像之前那样,检索输入数据。
Line 93:从列表中取出当前的隐含层。
Line 94:从列表中取出前一个隐含层。
Line 97:从列表中取出当前输出层的误差。
Line 99:这一行计算了当前隐含层的误差。通过当前之后一个时间点的误差和当前输出层的误差计算。
Line 102-104:我们已经有了反向传播中当前时刻的导数值,那么就可以生成权值更新的量了(但是还没真正的更新权值)。我们会在完成所有的反向传播以后再去真正的更新我们的权值矩阵,这是为什么呢?因为我们要用权值矩阵去做反向传播。如此以来,在完成所有反向传播以前,我们不能改变权值矩阵中的值。
Line 109-115:现在我们就已经完成了反向传播,得到了权值要更新的量,所以就赶快更新权值吧(别忘了重置update变量)!
Line 118-end:这里仅仅是一些输出日志,便于我们观察中间的计算过程与效果。
''' | [
"[email protected]"
] | |
2847ad76c81be71b6db9629a664e76824972e3e8 | 256fec0d4f8d96a37f4dfad88acd7585b5607bdb | /transit_odp/site_admin/forms.py | 90db23f1631595e21204fb1adc2ecd248e0bea00 | [] | no_license | burlacenko/bods | b13a523d2e30fab6286d7c7515abfa800afc8209 | 5a4d490fc7142715a357255eb6b48cce4ab64c10 | refs/heads/main | 2023-07-26T12:59:52.633762 | 2021-08-24T11:14:05 | 2021-08-24T11:14:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,559 | py | from crispy_forms.layout import ButtonHolder, Field, Layout
from crispy_forms_govuk.forms import GOVUKForm, GOVUKModelForm
from crispy_forms_govuk.layout import ButtonSubmit, LinkButton
from crispy_forms_govuk.layout.fields import CheckboxSingleField
from django import forms
from django.contrib import auth
from django.core.exceptions import ValidationError
from django.utils.translation import gettext_lazy as _
from invitations.forms import CleanEmailMixin
from invitations.utils import get_invitation_model
from transit_odp.common.contants import DEFAULT_ERROR_SUMMARY
from transit_odp.common.layout import InlineFormset
from transit_odp.organisation.constants import FeedStatus
from transit_odp.organisation.forms.organisation_profile import (
NOCFormset,
OrganisationProfileForm,
)
from transit_odp.organisation.models import Organisation
from transit_odp.users.constants import AccountType
from transit_odp.users.forms.admin import (
EMAIL_HELP_TEXT,
EMAIL_INVALID,
EMAIL_LABEL,
EMAIL_MISSING,
)
User = auth.get_user_model()
Invitation = get_invitation_model()
class OrganisationNameForm(GOVUKModelForm, CleanEmailMixin):
form_error_title = DEFAULT_ERROR_SUMMARY
errors_template = "organisation/organisation_profile_form/errors.html"
class Meta:
model = Organisation
fields = ("name", "short_name")
email = forms.EmailField(
label=_("Key contact email"),
required=True,
widget=forms.TextInput(
attrs={
"type": "email",
"size": "30",
"class": "govuk-!-width-three-quarters",
}
),
)
def __init__(self, *args, **kwargs):
self.cancel_url = kwargs.pop("cancel_url")
super().__init__(*args, **kwargs)
name = self.fields["name"]
name.label = _("Organisation name")
name.widget.attrs.update({"class": "govuk-!-width-three-quarters"})
name.error_messages.update({"required": _("Enter the organisation name")})
short_name = self.fields["short_name"]
short_name.label = _("Organisation short name")
short_name.widget.attrs.update({"class": "govuk-!-width-three-quarters"})
short_name.error_messages.update(
{"required": _("Enter the organisation short name")}
)
self.nested = NOCFormset(
instance=self.instance,
data=self.data if self.is_bound else None,
files=self.files if self.is_bound else None,
)
def get_layout(self):
return Layout(
"name",
"short_name",
"email",
InlineFormset("nested"),
ButtonHolder(
ButtonSubmit(name="submit", content=_("Send Invitation")),
LinkButton(url=self.cancel_url, content="Cancel"),
),
)
class OrganisationContactEmailForm(CleanEmailMixin, GOVUKForm):
"""
The contact form derives from CleanEmailMixin since it is used to clean
the email to send the invite. This provides extra validation around email,
such as checking its not already in use / been invited.
"""
form_tag = False
form_error_title = _("There is a problem")
email = forms.EmailField(
label=_("E-mail"),
required=True,
widget=forms.TextInput(attrs={"type": "email", "size": "30"}),
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
email = self.fields["email"]
email.label = EMAIL_LABEL
email.help_text = EMAIL_HELP_TEXT
email.widget.attrs.update(
{"placeholder": "", "class": "govuk-!-width-three-quarters"}
)
email.error_messages.update(
{"required": EMAIL_MISSING, "invalid": EMAIL_INVALID}
)
def get_layout(self):
return Layout("email")
class OrganisationForm(OrganisationProfileForm):
class Meta:
model = Organisation
fields = ["name", "short_name", "key_contact", "licence_required"]
labels = {
"name": _("Organisation name"),
"short_name": _("Organisation short name"),
}
def __init__(
self, data=None, files=None, instance=None, cancel_url=None, *args, **kwargs
):
super().__init__(
data=data,
files=files,
instance=instance,
cancel_url=cancel_url,
*args,
**kwargs,
)
name = self.fields["name"]
name.widget.attrs.update({"class": "govuk-!-width-two-thirds"})
self.fields.update(
{
"key_contact": forms.ModelChoiceField(
queryset=User.objects.filter(organisations=instance).filter(
account_type=AccountType.org_admin.value
),
initial=instance.key_contact,
label=_("Key contact email"),
)
}
)
key_contact = self.fields["key_contact"]
self.fields["key_contact"].label_from_instance = lambda obj: obj.email
key_contact.widget.attrs.update({"class": "govuk-!-width-full govuk-select"})
def get_layout(self):
if self.instance.licences.exists():
checkbox = CheckboxSingleField(
"licence_required",
small_boxes=True,
disabled=True,
)
else:
checkbox = CheckboxSingleField(
"licence_required",
small_boxes=True,
)
return Layout(
Field("name", wrapper_class="govuk-form-group govuk-!-margin-bottom-4"),
Field(
"short_name", wrapper_class="govuk-form-group govuk-!-margin-bottom-4"
),
Field(
"key_contact",
wrapper_class="govuk-form-group govuk-!-margin-bottom-4",
),
InlineFormset("nested_noc"),
InlineFormset("nested_psv"),
checkbox,
ButtonHolder(
ButtonSubmit(name="submit", content=_("Save")),
LinkButton(url=self.cancel_url, content="Cancel"),
),
)
class OrganisationFilterForm(GOVUKForm):
form_method = "get"
form_tag = False
status = forms.ChoiceField(
choices=(("", "All statuses"), (0, "Active"), (1, "Pending"), (2, "Inactive")),
required=False,
)
name = forms.ChoiceField(
choices=(("", "All statuses"), (0, "Active"), (1, "Pending"), (2, "Inactive")),
required=False,
)
def get_layout(self):
return Layout(
Field("status", css_class="govuk-!-width-full"),
Field("name", css_class="govuk-!-width-full"),
ButtonSubmit("submitform", "submit", content=_("Apply filter")),
)
class BulkResendInvitesForm(forms.Form):
form_method = "get"
bulk_invite = forms.BooleanField(required=False, initial=False)
invites = forms.IntegerField(required=False)
def __init__(self, *args, orgs=None, **kwargs):
self.orgs_qs = orgs
super().__init__(*args, **kwargs)
def clean(self):
if self.data.get("bulk_invite", False) and not self.data.getlist("invites"):
raise ValidationError(
_("Please select organisation(s) from below to resend invitation")
)
def clean_invites(self):
org_ids = [int(org_id) for org_id in self.data.getlist("invites", [])]
return org_ids
def _post_clean(self):
if (
self.orgs_qs.filter(id__in=self.cleaned_data["invites"])
.exclude(status="pending")
.exists()
):
self.add_error(
None,
ValidationError(
_(
"You cannot send invites to already active organisations, "
"please select pending ones"
)
),
)
class BaseDatasetSearchFilterForm(GOVUKForm):
form_method = "get"
form_tag = False
status = forms.ChoiceField(
choices=(
("", "All statuses"),
(FeedStatus.live.value, "Published"),
(FeedStatus.success.value, "Draft"),
(FeedStatus.expired.value, "Expired"),
(FeedStatus.inactive.value, "Inactive"),
),
required=False,
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["status"].widget.attrs.update({"aria-label": "Filter by"})
def get_layout(self):
return Layout(
Field("status", css_class="govuk-!-width-full"),
ButtonSubmit("submitform", "submit", content=_("Apply filter")),
)
class TimetableSearchFilterForm(BaseDatasetSearchFilterForm):
pass
class AVLSearchFilterForm(BaseDatasetSearchFilterForm):
status = forms.ChoiceField(
choices=(
("", "All statuses"),
(FeedStatus.live.value, "Published"),
(FeedStatus.error.value, "Error"),
(FeedStatus.inactive.value, "Deactivated"),
),
required=False,
)
class EditNotesForm(GOVUKModelForm):
class Meta:
model = User
fields = ["notes"]
labels = {"notes": "Notes"}
widgets = {
"notes": forms.Textarea(attrs={"rows": 5, "cols": 20}),
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
| [
"[email protected]"
] | |
63e933800e2d19f9323b0119310390270a238f10 | 9988b0ab12b7ff3a61cc8e4fd4ebd3cc7239f224 | /www/static/app.py | af99520f2b0ebe8eff96391fb79e619ec072f790 | [] | no_license | onceonmydoor/python3-webapp | c19f04801b9301d50fd06a9ebb1acf27df27da8d | 2a30c37bfc3dde1374b74d611be184f4c4dbd61f | refs/heads/master | 2020-04-13T19:27:03.518929 | 2018-12-28T12:00:57 | 2018-12-28T12:00:57 | 163,402,876 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 456 | py | import logging;logging.basicConfig(level=logging.INFO)
import logging; logging.basicConfig(level=logging.INFO)
from aiohttp import web
def index(request):
return web.Response(body='<h1>Awesome</h1>'.encode('utf-8'),content_type='text/html')
def init():
app = web.Application()
app.router.add_route('GET', '/', index)
web.run_app(app, host='127.0.0.1', port=5888)
logging.info('server started at http://127.0.0.1:5888...')
init()
| [
"[email protected]"
] | |
6a40790f672e4d1d3e784fb7a0f362677fef2494 | e506ad5c7e8da9159771dbca1c60a19bb0433556 | /base/thirdparty/ACE/timex_mention.py | 16f99aa55949a7c37499546dddd60792831b18d2 | [] | no_license | FantasyoO666/delta-learning-for-ed | 9aeb2b2d481dbbc747abba43a7bda161978b8813 | a4b03cdc9d36af706250c4f6c8d1083a1a5d83a5 | refs/heads/master | 2022-01-21T16:22:50.149451 | 2019-08-20T10:25:12 | 2019-08-20T10:25:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 997 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Created by Roger on 2018/2/19
from .mention import Mention
from .charseq import CharSeq
class TimexMention(Mention):
def __init__(self, id, timex, extent):
"""
:param id: str
:param timex: Timex
:param extent: charseq
"""
Mention.__init__(self, id, extent, extent)
self.timex = timex
@staticmethod
def from_xml_tags(tags, docid=None, timex=None):
tm_id = tags.get('ID')
extent = CharSeq.from_xml_tags(tags.find_all('extent')[0].find_all('charseq')[0], docid=docid)
return TimexMention(id=tm_id, timex=timex, extent=extent)
def to_xml_tags(self):
vm_head = ' <timex2_mention ID="%s"">' % self.id
extent = ' <extent>\n %s\n </extent>' % self.extent.to_xml_tags()
vm_end = ' </timex2_mention>'
return '\n'.join([vm_head, extent, vm_end])
def get_label(self):
return self.timex.get_label()
| [
"[email protected]"
] | |
0dcedff9199e5313201e0475f487cf8386ed7ae1 | 5e5caf3ec6e50f1d5b5c445c462c9ce328e01e70 | /WebDeveloping/models/database.py | 0f684e48f6ae338bc45bcb6e2cfdae342394a586 | [] | no_license | janaobsteter/PyCharmProjects | f6a254e6fbc3f068e1c798d3489f52aac9c1bea3 | 2d9e14ad821a7dcec352eb3bb2d053fb49d8fb42 | refs/heads/master | 2021-07-16T23:56:41.287470 | 2020-05-12T11:59:58 | 2020-05-12T11:59:58 | 156,727,320 | 0 | 0 | null | 2020-05-12T12:05:26 | 2018-11-08T15:25:59 | Python | UTF-8 | Python | false | false | 701 | py | import pymongo
class Database(object):
URI = "mongodb://127.0.0.1:27017" #universal resource indentifier
DATABASE = None
@staticmethod
def initialise():
client = pymongo.MongoClient(Database.URI)
Database.DATABASE = client["webdeveloping"]
@staticmethod
def insert(collection, data):
Database.DATABASE[collection].insert(data)
@staticmethod
def find(collection, query):
Database.DATABASE[collection].find(query) #returns the cursor(start at the first element, go through all)
@staticmethod
def find_one(collection, query):
Database.DATABASE[collection].find_one(query) #gets the first element returned by the cursor | [
"[email protected]"
] | |
8c0d9de7d05a97c3bee7a98b39df86ac8b75898a | ac4145fb85bde7313d8641d3093940a87af8062f | /flask/timeline.py | e637cd97da0a037974375b9e74af9e510897668c | [] | no_license | arrpak/KSCHOOL-TFM-1 | 22d89a749692ba3715ae8347d72cfcb8b0306b60 | 886fa6c066782c5588435514bd4b9af48689c8ad | refs/heads/master | 2021-01-22T19:53:54.763137 | 2016-11-30T10:51:46 | 2016-11-30T10:51:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,298 | py | def tweets_timeline(query='Jets', hourP=10000):
from pymongo import MongoClient
from datetime import datetime, timedelta
MONGODB_HOST = 'localhost'
MONGODB_PORT = 27017
DBS_NAME = 'KSCHOOL_TFM'
COLLECTION_NAME = 'tweets'
connection = MongoClient(MONGODB_HOST, MONGODB_PORT) #getting client
collection = connection[DBS_NAME][COLLECTION_NAME] #getting db.tweets
print 'tweets_timeline'
pipeline_2=[{"$match" : {"dateTime" : {"$gte": datetime.utcnow() - timedelta(hourP) }, "tag": query}},
{"$group" : {"_id" : {"year" : {"$year": "$dateTime"},
"month" : {"$month": "$dateTime"},
"day" : {"$dayOfMonth": "$dateTime"},
"hour": {"$hour": "$dateTime"}
},
"count":{"$sum":1}}},
{"$sort" : {"date" : 1 }},
{"$limit": 100}]
u=[doc for doc in collection.aggregate(pipeline_2)]
dts = map(lambda x: x['_id'], u)
cs = map(lambda x: x['count'], u)
f = []
for dt, c in zip(dts,cs):
l=map(lambda x: str(x), [dt['year'], dt['month'],dt['day'],dt['hour']])
f+=[{'date' : "-".join(l), 'value':c}]
return f
| [
"[email protected]"
] | |
dd94c2ee421d7aa76339e07bb662af5dcf6d79de | 694ab768fcd52ee39941fd3097d9a6d5f7698ba3 | /lesson7/task1.py | 7410b68fb95375fe9bc2da13b856e6424a1f189c | [] | no_license | anatolytsi/python_basics_homeworks | 1c811fb8ac6f24813a981af1f1bb9276733fc141 | f317f89834411bcb65bc48429dda207cc65c8c06 | refs/heads/master | 2023-03-02T02:44:19.171660 | 2021-02-04T09:21:34 | 2021-02-04T09:21:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,828 | py | """
Реализовать класс Matrix (матрица). Обеспечить перегрузку конструктора класса (метод __init__()), который должен
принимать данные (список списков) для формирования матрицы.
Следующий шаг — реализовать перегрузку метода __str__() для вывода матрицы в привычном виде.
Далее реализовать перегрузку метода __add__() для реализации операции сложения двух объектов класса Matrix (двух
матриц). Результатом сложения должна быть новая матрица.
"""
class Matrix:
def __init__(self, data: list):
self.data = data
def __str__(self):
temp_str = '\n'
for row in self.data:
temp_str += '\t'.join([str(itm) for itm in row]) + '\n'
return temp_str
def __add__(self, other):
if (len(self.data) is len(other.data)) and (len(self.data[0]) is len(other.data[0])):
# First connect the two matrices together in tuples
output_data = [list(zip(self.data[idx], other.data[idx])) for idx, itm in enumerate(self.data)]
# Then sum each tuple
output_data = [list(map(sum, row)) for row in output_data]
return Matrix(output_data)
TypeError('Matrices should be of the same length!')
matrix_a = [[1, 2, 3],
[5, 8, 1],
[12, 3, 2]]
matrix_b = [[5, 21, 4],
[6, 2, 43],
[11, 12, 8]]
mat_a = Matrix(matrix_a)
mat_b = Matrix(matrix_b)
print(f'Matrix A: {mat_a}')
print(f'Matrix B: {mat_b}')
mat_c = mat_a + mat_b
print(f'Matrix C: {mat_c}')
| [
"[email protected]"
] | |
97f8e319c9eb5577233928243a1173d9b418f7e2 | 7239f6985e975309c8b75b5796c5b8af0a301566 | /Exercice1/USER_MATERIALS/select_all_user_material.py | 1362afef6c13e5806a46786b99eef4bb658c0e2d | [] | no_license | omohammed95/MOHAMMED_OSMAN_DATABASE_1C_2020 | e8219a15cddd6a4f02989a5a3b4a2f9b0f3700d0 | a60ac414fb5fdc42fd58d78c91b4fa56e43847df | refs/heads/master | 2021-04-04T07:35:36.324951 | 2020-07-02T21:16:14 | 2020-07-02T21:16:14 | 248,437,989 | 0 | 1 | null | 2021-03-20T03:58:39 | 2020-03-19T07:28:53 | Python | UTF-8 | Python | false | false | 2,386 | py | # select_all_user_material.py
# OM 2020.03.26 le but est d'afficher tous les lignes de la table "t_genres_films" en MySql.
# Importer le fichier "select_table.py" dans lequel il y a quelques classes et méthodes en rapport avec l'affichage des données dans UNE SEULE table.
import json
from Exercice1.DATABASE.SELECT import select_table
try:
# OM 2020.03.26 Une instance "select_record" pour permettre l'utilisation des méthodes de la classe DbSelectOneTable
select_record = select_table.DbSelectOneTable()
# Pour l'affichage du contenu suite à une requête SELECT avec un tri sur la colonne id_genre
# Cette requête provient du fichier à disposition dans mon gitlab "REQUETES_NOM_PRENOM_SUJET_BD_104.sql"
mysql_select_string = ("SELECT id_user, Nom , id_material, material FROM t_user_material AS T1\n"
"INNER JOIN t_user AS T2 ON T2.id_user = T1.fk_user\n"
"INNER JOIN t_material AS T3 ON T3.id_material = T1.fk_material")
# Les résultats de la requête se trouvent dans la variable "records_select" de type <class 'list'>
records_select = select_record.select_rows(mysql_select_string)
# Affiche différentes formes de "sortie" des données. Il y en a beaucoup d'autres, suivant l'utilisation finale (client WEB par ex.)
print("Type de type records_select : ",type(records_select),"Tous les résultats ", records_select, "Type des résultats ")
for row in records_select:
print(row['Nom'],row['material'],)
for row in records_select:
output = "nom du user: {Nom} material: {material}"
print(output.format(**row))
# Le meilleur pour la fin : le module pymysql intègre la conversion en JSON avec "cursorclass=pymysql.cursors.DictCursor"
# Pour vous prouver ceci, il faut importer le module JSON et vous comparer le résultat des print ci-dessous
# Il faut absolument approcher le format JSON
# https://developer.mozilla.org/fr/docs/Learn/JavaScript/Objects/JSON
print("Tous les résultats déjà en JSON ", records_select)
print(json.dumps(records_select))
print(json.dumps(records_select, sort_keys=True, indent=4, separators=(',', ': '), default=str))
except Exception as erreur:
# OM 2020.03.01 Message en cas d'échec du bon déroulement des commandes ci-dessus.
print("error message: {0}".format(erreur))
| [
"[email protected]"
] | |
17560bd5f9f10b9e74273cc868dfde2efc1484a4 | 709c693f7c0ee695020d74f0f4434e0e495c4b37 | /2013/fairandsquare/fairandsquare.py | da0ddb7ee812c3ceef33441eb30e83d8aeae02eb | [] | no_license | mstepniowski/codejam | 75cdf5db20c28409cecc361a653798637de6349e | c85d24f2f6d1c333acea16ce3e54b8fb0e7020fe | refs/heads/master | 2021-01-01T05:34:28.045438 | 2014-05-04T02:50:22 | 2014-05-04T02:50:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 939 | py | # http://mathforum.org/library/drmath/view/51510.html
NUMBERS = [1, 4, 9, 121, 484, 10201, 12321, 14641, 40804, 44944, 1002001, 1234321, 4008004, 100020001, 102030201, 104060401, 121242121, 123454321, 125686521, 400080004, 404090404, 10000200001, 10221412201, 12102420121, 12345654321, 40000800004, 1000002000001, 1002003002001, 1004006004001, 1020304030201, 1022325232201, 1024348434201, 1210024200121, 1212225222121, 1214428244121, 1232346432321, 1234567654321, 4000008000004, 4004009004004]
def fairandsquare(a, b):
return len([n for n in NUMBERS if n >= a and n <= b])
def read_numbers(line):
if line[-1] == '\n':
line = line[:-1]
return [int(x) for x in line.split()]
if __name__ == '__main__':
import sys
case_count = int(sys.stdin.readline()[:-1])
for i in range(1, case_count + 1):
a, b = read_numbers(sys.stdin.readline())
print "Case #{}: {}".format(i, fairandsquare(a, b))
| [
"[email protected]"
] | |
84601ab634b5f6671bed9cde9880ffdae7c2c43a | b353fb05fd16ae7bad53d2057d71f29cfc3fd411 | /js2py/prototypes/jserror.py | 4ea19c758703e3d14cfb839db246d26c1f111fc0 | [] | no_license | barseghyanartur/Js2Py | edf06d302b0477f5854ee653a8b8a932d77cee83 | 36729de70325ad1fea251c52328bdceba9712282 | refs/heads/master | 2021-01-17T15:45:23.107297 | 2015-06-10T09:35:35 | 2015-06-10T09:35:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 428 | py |
class ErrorPrototype:
def toString():
if this.TYPE!='Object':
raise this.MakeError('TypeError', 'Error.prototype.toString called on non-object')
name = this.get('name')
name = 'Error' if name.is_undefined() else name.to_string().value
msg = this.get('message')
msg = '' if msg.is_undefined() else msg.to_string().value
return name + (name and msg and ' : ') + msg | [
"[email protected]"
] | |
a6dbd788d0d9bde9401a79da5825a86b381902b3 | 5507b66702cc5370259ee35118011cc69add0246 | /cart/cart.py | bd77343764cdcce7abdcd6875b224b65661eebc3 | [] | no_license | teros0/django-shop | 6ec2c63a2db26c91cf112e183bcd1bcc7dcd7349 | 23c1848906004eabf8fe332b47e3845653316fed | refs/heads/master | 2020-12-24T12:32:08.842899 | 2016-11-30T14:32:16 | 2016-11-30T14:32:16 | 72,989,244 | 0 | 3 | null | 2016-11-08T11:20:58 | 2016-11-06T12:30:51 | Python | UTF-8 | Python | false | false | 2,387 | py | from decimal import Decimal
from django.conf import settings
from shop.models import Product
class Cart(object):
def __init__(self, request):
"""
Initialize the cart.
"""
self.session = request.session
cart = self.session.get(settings.CART_SESSION_ID)
if not cart:
# save an empty cart in the session
cart = self.session[settings.CART_SESSION_ID] = {}
self.cart = cart
def add(self, product, quantity=1, update_quantity=False):
"""
Add a product to the cart or update its quantity
"""
product_id = str(product.id)
if product_id not in self.cart:
self.cart[product_id] = {'quantity': 0, 'price': str(product.price)}
if update_quantity:
self.cart[product_id]['quantity'] = quantity
else:
self.cart[product_id]['quantity'] += quantity
self.save()
def save(self):
# update the session cart
self.session[settings.CART_SESSION_ID] = self.cart
# mark the session as "modified" to make sure it is save
self.session.modified = True
def remove(self, product):
"""
Remove a product from the cart
"""
product_id = str(product.id)
if product_id in self.cart:
del self.cart[product_id]
self.save()
def __iter__(self):
"""
Iterate over the items in the cart and get the products
from the database.
"""
product_ids = self.cart.keys()
# get the product object and add them to the cart
products = Product.objects.filter(id__in=product_ids)
for product in products:
self.cart[str(product.id)]['product'] = product
for item in self.cart.values():
item['price'] = Decimal(item['price'])
item['total_price'] = item['price'] * item['quantity']
yield item
def __len__(self):
"""
Count all items in the cart.
"""
return sum(item['quantity'] for item in self.cart.values())
def get_total_price(self):
return sum(Decimal(item['price']) * item['quantity'] for item in self.cart.values())
def clear(self):
# remove cart from session
del self.session[settings.CART_SESSION_ID]
self.session.modified = True
| [
"[email protected]"
] | |
742272efd7bf353f3c6b1589fbd1db446d54ac64 | 1570a00322588f8b7ea76163e9d2ea8f97876065 | /examples/01/clone.py | c1405448f186057f7452d8b67ec7ae94e3679e43 | [
"Apache-2.0"
] | permissive | pyaillet/ebpf-discovery | ce7aa280da1d15caa205152aed41c9f59f246564 | f152690ac641bf9cde8fde9b5b16c45a641b9343 | refs/heads/master | 2020-06-05T17:53:04.125720 | 2019-07-08T15:16:15 | 2019-07-08T15:16:15 | 192,503,728 | 0 | 0 | Apache-2.0 | 2019-07-08T14:04:57 | 2019-06-18T08:58:50 | null | UTF-8 | Python | false | false | 299 | py | #!/usr/bin/env python
import os
import time
def child():
print('New child ', os.getpid())
os._exit(0)
def parent():
while True:
newpid = os.fork()
if newpid == 0:
child()
else:
time.sleep(3)
os.waitpid(newpid, 0)
parent()
| [
"[email protected]"
] | |
43292353133db5aaa14790aa2ea784c1479a3ab2 | f967e571577b3d037a58610e152a76ceb8094314 | /ClusterUtils/DBScan.py | 97cb92d34eddda5eca8313e84b714dd558fced2e | [] | no_license | twillkens/ClusterUtils | 5a95f7e3e0872ec173c43617c62b27ff3cde766c | 92db3149b20b53482eb51efb68d1fa001835cbfb | refs/heads/master | 2020-03-28T22:08:56.587293 | 2018-10-01T19:36:46 | 2018-10-01T19:36:46 | 149,208,262 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,736 | py | import pandas as pd
import numpy as np
import time
from ClusterUtils.SuperCluster import SuperCluster
from ClusterUtils.ClusterPlotter import _plot_generic_
def dbscan(X, eps=1, min_points=10, verbose=False):
# Implement.
# Input: np.darray of samples
# Return a array or list-type object corresponding to the predicted cluster
# numbers, e.g., [0, 0, 0, 1, 1, 1, 2, 2, 2]
return None
# The code below is completed for you.
# You may modify it as long as changes are noted in the comments.
class DBScan(SuperCluster):
"""
Perform DBSCAN clustering from vector array.
Parameters
----------
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
csv_path : str, default: None
Path to file for dataset csv
keep_dataframe : bool, default: True
Hold on the results pandas DataFrame generated after each run.
Also determines whether to use pandas DataFrame as primary internal data state
keep_X : bool, default: True
Hold on the results generated after each run in a more generic array-type format
Use these values if keep_dataframe is False
verbose: bool, default: False
Optional log level
"""
def __init__(self, eps=1, min_points=10, csv_path=None, keep_dataframe=True,
keep_X=True,verbose=False):
self.eps = eps
self.min_points = min_points
self.verbose = verbose
self.csv_path = csv_path
self.keep_dataframe = keep_dataframe
self.keep_X = keep_X
# X is an array of shape (n_samples, n_features)
def fit(self, X):
if self.keep_X:
self.X = X
start_time = time.time()
self.labels = dbscan(X, eps=self.eps, min_points = self.min_points,verbose = self.verbose)
print("DBSCAN finished in %s seconds" % (time.time() - start_time))
return self
def show_plot(self):
if self.keep_dataframe and hasattr(self, 'DF'):
_plot_generic_(df=self.DF)
elif self.keep_X:
_plot_generic_(X=self.X, labels=self.labels)
else:
print('No data to plot.')
def save_plot(self, name):
if self.keep_dataframe and hasattr(self, 'DF'):
_plot_generic_(df=self.DF, save=True, n=name)
elif self.keep_X:
_plot_generic_(X=self.X, labels=self.labels, save=True, n=name)
else:
print('No data to plot.')
| [
"[email protected]"
] | |
0d72aa6a076fcded774eb89904bc4d34271cc0aa | 799a390a86daabe17f684bc32ea505bfd1f2c112 | /backend/blog_life/wsgi.py | 14b4c893926865a7b6ce0a116a429af175309642 | [] | no_license | AhmedSoliman92/Blog-app-django-react | 23755ed63b36523c45457d05b9caf05718e5d27a | d42ceaf0930cd4cde6e837969ba7acd9caf9cd51 | refs/heads/main | 2023-07-13T13:21:50.465331 | 2021-08-18T01:42:32 | 2021-08-18T01:42:32 | 397,340,787 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | """
WSGI config for blog_life project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'blog_life.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
b49890e253f33a41f74fc1745e616f9449e44f59 | 5f6233ba455bbce5c64a95e0ecffc57631320520 | /gethsemane_project/api/ShipCombat/Models/Program.py | b21c1b380a4a90ca245a4686a65190e903e275f7 | [] | no_license | geier4701/gethsemane | ac90bbe0f4b182630ef22dfa916799ac9c7a5d13 | ef727b44a3c06379a5597accdefbdd6cd6765b78 | refs/heads/master | 2022-04-17T17:24:13.398844 | 2020-04-14T22:01:01 | 2020-04-14T22:01:01 | 165,969,989 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 334 | py | from typing import List
from api.ShipCombat.Models.Subroutines.Subroutine import Subroutine
class Program:
program_id: int
name: str
subroutines: List[Subroutine]
def __init__(self, program_id: int, name: str, subroutines: List[Subroutine]):
self.program_id = program_id
self.name = name
self.subroutines = subroutines
| [
"[email protected]"
] | |
8624e95de67d60c847a94f25c5a35512db761bc5 | 16cec5e43f02906e14d98d4f53f59a4b4fd74625 | /DL_HW1/Question_1/utils/__init__.py | dcdd9053df3b386a37065832a92c7671be514f18 | [] | no_license | EthanLiao/Deep_Learning_NCTU_Homework | 150e70ca6ad61f789bb7cfb23584be94f4236d4c | a3e075dbd797d6965c1bd01c7e54855c563032d6 | refs/heads/master | 2022-11-12T19:24:08.702634 | 2020-07-02T03:30:51 | 2020-07-02T03:30:51 | 264,079,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23 | py | # from .utils import *
| [
"[email protected]"
] | |
4091e72a1bcb8211316043f7716ec35cd20b96ca | 48184fbfce5221022760ea5cf10978a4c79f0fe3 | /firmware/spiders/trendnet.py | bbc543a8d8b03b3132ded03a36ba3ce3bebc243b | [
"MIT"
] | permissive | MikimotoH/scraper | 7c1b7bd2322d4d3ff1ad7eeb6143f55442028471 | 375e41be2f7d17f7b56b38b7a6324f1c9eb7b331 | refs/heads/master | 2020-04-05T23:32:55.055613 | 2016-09-24T23:46:32 | 2016-09-24T23:46:32 | 60,673,858 | 2 | 0 | null | 2016-06-08T06:23:49 | 2016-06-08T06:23:48 | null | UTF-8 | Python | false | false | 2,452 | py | from scrapy import Spider
from scrapy.http import Request
from firmware.items import FirmwareImage
from firmware.loader import FirmwareLoader
import urlparse
class TrendnetSpider(Spider):
name = "trendnet"
allowed_domains = ["trendnet.com"]
start_urls = ["http://www.trendnet.com/support/"]
def parse(self, response):
for entry in response.xpath("//select[@id='SUBTYPE_ID']/option"):
if entry.xpath(".//text()"):
text = entry.xpath(".//text()").extract()[0]
href = entry.xpath("./@value").extract()[0]
yield Request(
url=urlparse.urljoin(response.url, href),
meta={"product": text},
headers={"Referer": response.url},
callback=self.parse_product)
def parse_product(self, response):
for tab in response.xpath("//ul[@class='etabs']//a"):
text = tab.xpath(".//text()").extract()[0]
href = tab.xpath("./@href").extract()[0]
if "downloads" in text.lower():
yield Request(
url=urlparse.urljoin(response.url, href),
meta={"product": response.meta["product"]},
headers={"Referer": response.url},
callback=self.parse_download)
def parse_download(self, response):
for entry in response.xpath("//div[@class='downloadtable']"):
text = entry.xpath(".//text()").extract()
if "firmware" in " ".join(text).lower():
text = entry.xpath(
".//li[@class='maindescription' and position() = 1]//text()").extract()
date = entry.xpath(
".//li[@class='maindescription' and position() = 2]//text()").extract()
href = entry.xpath(".//li[@class='maindescription']//a/@onclick").extract()[
0].split('\'')[1] + "&button=Continue+with+Download&Continue=yes"
item = FirmwareLoader(
item=FirmwareImage(), response=response, date_fmt=["%m/%d/%Y"])
item.add_value("url", href)
item.add_value("product", response.meta["product"])
item.add_value("date", item.find_date(date))
item.add_value("version", FirmwareLoader.find_version(text))
item.add_value("vendor", self.name)
yield item.load_item()
| [
"[email protected]"
] | |
0b74b6f496b04b66b37c76c57d882b70ad8e56bc | c7ca9a72d051f9e24302d8b327e6f6962814951b | /16/part1.py | 115e2c16e6fd26ac47eea1646e1c9709b8814ea9 | [] | no_license | imba28/advent-of-code-2020 | 96dd83d4df145b65d31f4239bffa61b07e2c01c5 | 26e79ae6d1b523902e1a1e2a444acd0bc94f8158 | refs/heads/master | 2023-02-13T22:55:07.601221 | 2021-01-14T14:54:41 | 2021-01-14T14:54:49 | 319,702,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,055 | py | import re
def _parse_int_list(string):
return [int(v) for v in string.split(",")]
def other_tickets_state(line, data):
if line == "\n":
return None
if "nearby tickets" not in line:
if "other_tickets" not in data:
data["other_tickets"] = []
data["other_tickets"].append(_parse_int_list(line))
return other_tickets_state
def my_tickets_state(line, data):
if line == "\n":
return other_tickets_state
if "your ticket" not in line:
assert "my_ticke" not in data
data["my_ticket"] =_parse_int_list(line)
return my_tickets_state
def rules_state(line, data):
if line == "\n":
return my_tickets_state
if 'rules' not in data:
data['rules'] = {}
match = re.match(r"^([\w\s]+):\s([\d\-\d]+)\D+([\d\-\d]+)$", line)
if match:
groups = match.groups()
rule_name = groups[0]
assert rule_name not in data['rules']
ranges = [[int(v) for v in group.split("-")] for group in groups[1:]]
data['rules'][rule_name] = ranges
return rules_state
def get_data(filePath):
with open(filePath, "r") as f:
state = rules_state
data = {}
for line in f.readlines():
state = state(line, data)
if state == None:
return data
return data
def get_invalid_numbers(rules, tickets):
invalid_numbers = []
flatten = lambda t: [v for l in t for v in l]
all_ranges = flatten([ranges for k, ranges in rules.items()])
for ticket in tickets:
for number in ticket:
valid = False
for ranges in all_ranges:
if ranges[0] <= number <= ranges[1]:
valid = True
break
if not valid:
invalid_numbers.append(number)
return invalid_numbers
if __name__ == "__main__":
data = get_data("16/input")
invalid_numbers = get_invalid_numbers(data["rules"], data["other_tickets"])
print(sum(invalid_numbers)) | [
"[email protected]"
] | |
5e47459e9d1072ac065a1fa1ea1624a8399ebe4c | cec26f63986ea948315bdc4120aabdcbe0665cf1 | /eval/collabwebdb_data/plot_scripts2/helper.py | 8ce76938bc2f4a23ddac282104828213fd70ca37 | [] | no_license | ryscheng/CollaborativeWebDB | a3c22ef31191928d0444337a3259db104521f05a | 429f4e924efd872bda252ef4871526acaf47b64b | refs/heads/master | 2021-01-20T06:54:56.444976 | 2012-12-13T00:46:04 | 2012-12-13T00:46:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,022 | py | #!/usr/bin/python
import sqlite3
import sys
import os
import errno
import random
import gzip
import resource
import logging
import logging.handlers
import time
from optparse import OptionParser
import subprocess, threading
import re
class Point(object):
def __init__(self,time,d):
self.time = time
self.d = d
input = open(sys.argv[1], "r")
line = input.readline()
serv_output = open(sys.argv[2], "w")
client_output = open(sys.argv[3], "w")
idx_start_cpu = line.index(": {")
str_start_cpu = line[idx_start_cpu+3:]
idx_end_cpu = str_start_cpu.index("},")
str_cpu = str_start_cpu[:idx_end_cpu]
line = str_start_cpu[idx_end_cpu+3:]
idx_start_servstats = line.index(": {")
str_start_servstats = line[idx_start_servstats+3:]
idx_end_servstats = str_start_servstats.index("},")
str_servstats = str_start_servstats[:idx_end_servstats]
line = str_start_servstats[idx_end_servstats+3:]
idx_start_clientstats = line.index("counts\": {")
str_start_clientstats = line[idx_start_clientstats+10:]
idx_end_clientstats = str_start_clientstats.index("},")
str_clientstats = str_start_clientstats[:idx_end_clientstats]
serv_points = str_servstats.split(", ")
min_time = 100000000000000
servLoad = []
for p in serv_points:
time_qt = p.split(":")[0]
time = float(time_qt.strip("\""))
if time < min_time:
min_time = time
load = p.split(":")[1]
serv_load = Point(time,load)
servLoad.append(serv_load)
servLoad.sort(key=lambda x: x.time)
for p in servLoad:
serv_output.write(str(p.time-min_time) + " " + str(p.d) + "\n")
client_points = str_clientstats.split(", ")
min_time = 100000000000000
clientLoad = []
for p in client_points:
time_qt = p.split(":")[0]
time = float(time_qt.strip("\""))
if time < min_time:
min_time = time
load = p.split(":")[1]
client_load = Point(time,load)
clientLoad.append(client_load)
clientLoad.sort(key=lambda x: x.time)
for p in clientLoad:
client_output.write(str(p.time-min_time) + " " + str(p.d) + "\n")
| [
"[email protected]"
] | |
3a01fd00648abf1614985d8888185c470bb3ef9f | 9a5bded09adcc62b5ffeef6e6f9b4a7a8072741a | /Archive/Insurance_Amount.py | c03119510bde0cb7559b94c04996f3d068141bf0 | [] | no_license | njpayne/pythagoras | 81983f928a77b2be8ac2f59b058b6429a3c789bc | 925bb5c635c096a0ced4a378d9f3f042bc491249 | refs/heads/master | 2020-07-26T21:12:22.700774 | 2015-08-04T05:10:38 | 2015-08-04T05:10:38 | 12,442,527 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 892 | py | # Many financial experts advise that property owners should insure their homes
# or buildings for at least 80 percent of the amount that it would cost to replace the structure.
# Write a program that asks the user to enter the replacement cost of a building and then
# displays the minimum amount of insurance he or she should buy for the property
import locale
locale.setlocale( locale.LC_ALL, '' )
# Get the relevant values from the user
replacement = float(input("Please enter the projected replacement cost of the building that you want to insure: "))
# Define the function
def insurance(replacement):
minimum = replacement * 0.8
print("Based on our calculations, the minimum amount of insurance that you should buy for your property is",
locale.currency(minimum, grouping=True ))
print("Would you like to purchase insurance now?")
# Call the main function
insurance(replacement)
| [
"[email protected]"
] | |
c25f6e781b6f88fd2a0ce9847c95d3bfeddf9394 | 102599c40f41f0c2b440c1923b4855b86cd1fac5 | /python/models/lookup.py | d7c29f685518bf548ff9a3bd766626a575b3f441 | [
"MIT"
] | permissive | project-anuvaad/anuvaad-corpus | b9c3390773736fe350ada3ad1a80a53530d1f6df | abf0ef866eb4cc52a84b4e05daaac211d7ca2333 | refs/heads/master | 2022-12-14T19:47:46.264884 | 2020-11-23T14:08:58 | 2020-11-23T14:08:58 | 197,180,339 | 2 | 2 | MIT | 2022-12-10T06:03:09 | 2019-07-16T11:23:26 | JavaScript | UTF-8 | Python | false | false | 120 | py | from mongoengine import *
class Lookup(Document):
text = StringField(required=True)
value = StringField()
| [
"[email protected]"
] | |
53e6112d9de22126e23635425ff166548b6a3860 | dc3e0f7e75cb8bbcf6b261a607353c7036297304 | /test_autolens/plot/inversions/interferometer/voronoi.py | a60dec2df8f865a29acd45204c9a540951dc4e7d | [
"MIT"
] | permissive | a-mere-peasant/PyAutoLens | 38db2e83ba0136fe6bfc0efa404bfafb7f6379e6 | 462634ff57afde86531146a51019b47c40f10874 | refs/heads/master | 2021-03-05T04:12:39.010444 | 2021-01-08T15:46:16 | 2021-01-08T15:46:16 | 246,093,699 | 0 | 0 | MIT | 2020-03-09T17:00:55 | 2020-03-09T17:00:54 | null | UTF-8 | Python | false | false | 2,094 | py | import autolens as al
import autolens.plot as aplt
import numpy as np
from test_autolens.simulators.interferometer import instrument_util
interferometer = instrument_util.load_test_interferometer(
dataset_name="mass_sie__source_sersic", instrument="sma"
)
# aplt.Interferometer.visibilities(interferometer=interferometer)
# aplt.Interferometer.uv_wavelengths(interferometer=interferometer)
lens_galaxy = al.Galaxy(
redshift=0.5,
mass=al.mp.EllipticalIsothermal(
centre=(0.0, 0.0), einstein_radius=1.6, elliptical_comps=(0.17647, 0.0)
),
)
# source_galaxy = al.Galaxy(
# redshift=1.0,
# light=al.lp.EllipticalSersic(
# centre=(0.0, 0.0),
# elliptical_comps=(-0.055555, 0.096225),
# intensity=0.4,
# effective_radius=0.5,
# sersic_index=1.0,
# ),
# )
source_galaxy = al.Galaxy(
redshift=1.0,
pixelization=al.pix.VoronoiMagnification(shape=(20, 20)),
regularization=al.reg.Constant(coefficient=1.0),
)
real_space_shape = 256
real_space_shape_2d = (real_space_shape, real_space_shape)
real_space_pixels = real_space_shape_2d[0] * real_space_shape_2d[1]
real_space_pixel_scales = 0.05
mask = al.Mask2D.circular(
shape_2d=real_space_shape_2d,
pixel_scales=real_space_pixel_scales,
sub_size=1,
radius=3.0,
)
masked_interferometer = al.MaskedInterferometer(
interferometer=interferometer,
real_space_mask=mask,
visibilities_mask=np.full(
fill_value=False, shape=interferometer.visibilities.shape
),
transformer_class=al.TransformerNUFFT,
)
tracer = al.Tracer.from_galaxies(galaxies=[lens_galaxy, source_galaxy])
inversion = tracer.inversion_interferometer_from_grid_and_data(
grid=masked_interferometer.grid,
visibilities=masked_interferometer.visibilities,
noise_map=masked_interferometer.noise_map,
transformer=masked_interferometer.transformer,
settings_inversion=al.SettingsInversion(use_linear_operators=True),
)
aplt.Inversion.reconstruction(inversion=inversion)
| [
"[email protected]"
] | |
325606a2957c215ef5d7c2f31f0727e4e12f5933 | d837ec03d56083939f2ff6740bfc51dd6009b243 | /nilesh/django-proj/projects/gharkakhana/gharkakhana/settings.py | edaedaf016cdcd793a468bb524ffe98211e5fe5b | [] | no_license | NilatGitHub/MyProject | 8c5822269813358ad0a73afbc809b29bafc127a4 | 8c43bcebcb7521fea348382b6fffbebccbc9e34d | refs/heads/master | 2022-01-18T06:05:59.452276 | 2019-08-01T10:27:03 | 2019-08-01T10:27:03 | 197,936,740 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,565 | py | """
Django settings for gharkakhana project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=18&x!rmc&-&li2yy6wzqx6qe#e#+#s7!+p=%ow)yh32^s_3=d'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'travello.apps.TravelloConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'gharkakhana.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'gharkakhana.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'gharkakhana',
'USER':'postgres',
'PASSWORD':'newpassword',
'HOST':'localhost'
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR,'static')
]
STATIC_ROOT = os.path.join(BASE_DIR,'assets')
MEDIA_URL = '/media/'
MEDIA_ROOT= os.path.join(BASE_DIR,'media') | [
"[email protected]"
] | |
24129b8d14849fee1e68ba2eb13dd7713bd0d837 | d0c35162ca6f4725c61239b859e888d4d79e9a9c | /mysite/polls/views.py | c046aa8bd0c9858d3dd7933931d03b2c99fabdad | [] | no_license | sunshine2323/django_polls | b129ba2fd3a206e17c5bdfbc5c8a5bcb749c7366 | f91adf02c77c66d6c5f04c410df3aa5e25dbe12d | refs/heads/master | 2021-05-08T13:20:11.450883 | 2018-02-02T17:17:58 | 2018-02-02T17:17:58 | 120,009,331 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,800 | py | from django.shortcuts import get_object_or_404, render
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.views import generic
from django.utils import timezone
from .models import Choice, Question
class IndexView(generic.ListView):
template_name = 'polls/index.html'
context_object_name = 'latest_question_list'
def get_queryset(self):
"""
Return the last five published questions (not including those set to be
published in the future).
"""
return Question.objects.filter(
pub_date__lte=timezone.now()
).order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
model = Question
template_name = 'polls/detail.html'
def get_queryset(self):
"""
Excludes any questions that aren't published yet.
"""
return Question.objects.filter(pub_date__lte=timezone.now())
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls/results.html'
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# Redisplay the question voting form.
return render(request, 'polls/detail.html', {
'question': question,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('polls:results', args=(question.id,))) | [
"[email protected]"
] | |
0ab5952214283207e1593af4f8dd520d2147644a | 77253c9c186ff13427b341b984dfcd56db0d1085 | /psdaq/psdaq/pyxpm/pvctrls.py | 2cdbbf704fbb7fd5f4589ebddfcb75d28d21308f | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | bopopescu/lcls2 | a196aa7659c13ba6462d5dd631f7dfe3f2d8b5ec | 66194e5f73d4d675706fbad632e86da1527173eb | refs/heads/master | 2022-11-20T07:37:00.347602 | 2020-07-21T07:18:12 | 2020-07-21T07:18:12 | 281,561,069 | 0 | 0 | null | 2020-07-22T03:07:40 | 2020-07-22T03:07:39 | null | UTF-8 | Python | false | false | 20,173 | py | import sys
import time
import traceback
import threading
import socket
import struct
from p4p.nt import NTScalar
from p4p.server.thread import SharedPV
from psdaq.pyxpm.pvseq import *
from psdaq.pyxpm.pvhandler import *
# db support
from psdaq.configdb.typed_json import cdict
import psdaq.configdb.configdb as cdb
from psdaq.configdb.get_config import get_config_with_params
provider = None
lock = None
countdn = 0
countrst = 60
class TransitionId(object):
Clear = 0
Config = 2
Enable = 4
Disable = 5
class RateSel(object):
FixedRate = 0
ACRate = 1
EventCode = 2
Sequence = 3
def pipelinedepth_from_delay(value):
v = value &0xffff
return ((v*200)&0xffff) | (v<<16)
def forceUpdate(reg):
reg.get()
def retry(cmd,pv,value):
for tries in range(5):
try:
cmd(pv,value)
break
except:
exc = sys.exc_info()
if exc[0]==KeyboardInterrupt:
raise
else:
traceback.print_exception(exc[0],exc[1],exc[2])
print('Caught exception... retrying.')
def retry_wlock(cmd,pv,value):
lock.acquire()
retry(cmd,pv,value)
lock.release()
class RetryWLock(object):
def __init__(self, func):
self.func = func
def __call__(self, pv, value):
retry_wlock(self.func, pv, value)
class RegH(PVHandler):
def __init__(self, valreg, archive=False):
super(RegH,self).__init__(self.handle)
self._valreg = valreg
self._archive = archive
def cmd(self,pv,value):
self._valreg.post(value)
def handle(self, pv, value):
global countdn
retry(self.cmd,pv,value)
if self._archive:
countdn = countrst
class IdxRegH(PVHandler):
def __init__(self, valreg, idxreg, idx):
super(IdxRegH,self).__init__(self.handle)
self._valreg = valreg
self._idxreg = idxreg
self._idx = idx
def cmd(self,pv,value):
self._idxreg.post(self._idx)
forceUpdate(self._valreg)
self._valreg.post(value)
def handle(self, pv, value):
retry_wlock(self.cmd,pv,value)
class L0DelayH(IdxRegH):
def __init__(self, valreg, idxreg, idx):
super(L0DelayH,self).__init__(valreg, idxreg, idx)
def handle(self, pv, value):
global countdn
retry_wlock(self.cmd,pv,pipelinedepth_from_delay(value))
countdn = countrst
class CmdH(PVHandler):
def __init__(self, cmd):
super(CmdH,self).__init__(self.handle)
self._cmd = cmd
def cmd(self,pv,value):
self._cmd()
def handle(self, pv, value):
if value:
retry(self.cmd,pv,value)
class IdxCmdH(PVHandler):
def __init__(self, cmd, idxcmd, idx):
super(IdxCmdH,self).__init__(self.handle)
self._cmd = cmd
self._idxcmd = idxcmd
self._idx = idx
def cmd(self,pv,value):
self._idxcmd.set(self._idx)
self._cmd()
def handle(self, pv, value):
if value:
retry_wlock(self.idxcmd,pv,value)
class RegArrayH(PVHandler):
def __init__(self, valreg, archive=False):
super(RegArrayH,self).__init__(self.handle)
self._valreg = valreg
self._archive = archive
def cmd(self,pv,val):
global countdn
for reg in self._valreg.values():
reg.post(val)
if self._archive:
countdn = countrst
def handle(self, pv, val):
retry(self.cmd,pv,val)
class LinkCtrls(object):
def __init__(self, name, xpm, link):
self._link = link
self._ringb = xpm.AxiLiteRingBuffer
self._app = xpm.XpmApp
app = self._app
linkreg = app.link
def addPV(label, reg, init=0):
pv = SharedPV(initial=NTScalar('I').wrap(init),
handler=IdxRegH(reg,linkreg,link))
provider.add(name+':'+label+'%d'%link,pv)
reg.set(init) # initialization
return pv
linkreg.set(link) # initialization
app.fullEn.set(1)
# self._pv_linkRxTimeout = addPV('LinkRxTimeout',app.rxTimeout, init=186)
self._pv_linkGroupMask = addPV('LinkGroupMask',app.fullMask)
# self._pv_linkTrigSrc = addPV('LinkTrigSrc' ,app.trigSrc)
self._pv_linkLoopback = addPV('LinkLoopback' ,app.loopback)
self._pv_linkTxReset = addPV('TxLinkReset' ,app.txReset)
self._pv_linkRxReset = addPV('RxLinkReset' ,app.rxReset)
print('LinkCtrls.init link {} fullEn {}'
.format(link, app.fullEn.get()))
def addPV(label, init, handler):
pv = SharedPV(initial=NTScalar('I').wrap(init),
handler=handler)
provider.add(name+':'+label+'%d'%link,pv)
return pv
self._pv_linkRxDump = addPV('RxLinkDump',0,handler=PVHandler(self.dump))
def _dump(self, pv, val):
self._app.linkDebug.set(self._link)
self._ringb.clear.set(1)
time.sleep(1e-6)
self._ringb.clear.set(0)
self._ringb.start.set(1)
time.sleep(100e-6)
self._ringb.start.set(0)
def dump(self,pv,val):
if val:
retry_wlock(self._dump,pv,val)
self._ringb.Dump()
class CuGenCtrls(object):
def __init__(self, name, xpm, dbinit=None):
def addPV(label, init, reg, archive):
pv = SharedPV(initial=NTScalar('I').wrap(init),
handler=RegH(reg,archive=archive))
provider.add(name+':'+label,pv)
reg.set(init)
return pv
try:
cuDelay = dbinit['XTPG']['CuDelay']
cuBeamCode = dbInit['XTPG']['CuBeamCode']
cuInput = dbinit['XTPG']['CuInput']
except:
cuDelay = 200*800
cuBeamCode = 140
cuInput = 1
self._pv_cuDelay = addPV('CuDelay' , cuDelay, xpm.CuGenerator.cuDelay , True)
self._pv_cuBeamCode = addPV('CuBeamCode', cuBeamCode, xpm.CuGenerator.cuBeamCode , True)
self._pv_clearErr = addPV('ClearErr' , 0, xpm.CuGenerator.cuFiducialIntvErr, False)
def addPV(label, init, reg, archive):
pv = SharedPV(initial=NTScalar('I').wrap(init),
handler=RegArrayH(reg, archive=archive))
provider.add(name+':'+label,pv)
for r in reg.values():
r.set(init)
return pv
self._pv_cuInput = addPV('CuInput' , cuInput, xpm.AxiSy56040.OutputConfig, True)
class PVInhibit(object):
def __init__(self, name, app, inh, group, idx):
self._group = group
self._idx = idx
self._app = app
self._inh = inh
def addPV(label,cmd,init):
pv = SharedPV(initial=NTScalar('I').wrap(init),
handler=PVHandler(cmd))
provider.add(name+':'+label+'%d'%idx,pv)
cmd(pv,init) # initialize
return pv
self._pv_InhibitInt = addPV('InhInterval', RetryWLock(self.inhibitIntv), 10)
self._pv_InhibitLim = addPV('InhLimit' , RetryWLock(self.inhibitLim ), 4)
self._pv_InhibitEna = addPV('InhEnable' , RetryWLock(self.inhibitEna ), 0)
def inhibitIntv(self, pv, value):
self._app.partition.set(self._group)
forceUpdate(self._inh.intv)
self._inh.intv.set(value-1)
def inhibitLim(self, pv, value):
self._app.partition.set(self._group)
forceUpdate(self._inh.maxAcc)
self._inh.maxAcc.set(value-1)
def inhibitEna(self, pv, value):
self._app.partition.set(self._group)
forceUpdate(self._inh.inhEn)
self._inh.inhEn.set(value)
class GroupSetup(object):
def __init__(self, name, app, group, stats, init=None):
self._group = group
self._app = app
self._stats = stats
def addPV(label,cmd,init=0,set=False):
pv = SharedPV(initial=NTScalar('I').wrap(init),
handler=PVHandler(cmd))
provider.add(name+':'+label,pv)
if set:
cmd(pv,init)
return pv
self._pv_L0Select = addPV('L0Select' ,self.put)
self._pv_FixedRate = addPV('L0Select_FixedRate' ,self.put)
self._pv_ACRate = addPV('L0Select_ACRate' ,self.put)
self._pv_ACTimeslot = addPV('L0Select_ACTimeslot' ,self.put)
self._pv_EventCode = addPV('L0Select_EventCode' ,self.put)
self._pv_Sequence = addPV('L0Select_Sequence' ,self.put)
self._pv_SeqBit = addPV('L0Select_SeqBit' ,self.put)
self._pv_DstMode = addPV('DstSelect' ,self.put, 1)
self._pv_DstMask = addPV('DstSelect_Mask' ,self.put)
self._pv_Run = addPV('Run' ,self.run , set=True)
self._pv_Master = addPV('Master' ,self.master, set=True)
self._pv_StepDone = SharedPV(initial=NTScalar('I').wrap(0), handler=DefaultPVHandler())
provider.add(name+':StepDone', self._pv_StepDone)
self._pv_StepGroups = addPV('StepGroups' ,self.stepGroups, set=True)
self._pv_StepEnd = addPV('StepEnd' ,self.stepEnd , set=True)
def addPV(label,reg,init=0,set=False):
pv = SharedPV(initial=NTScalar('I').wrap(init),
handler=IdxRegH(reg,self._app.partition,group))
provider.add(name+':'+label,pv)
if set:
self._app.partition.set(group)
reg.set(init)
return pv
self._pv_MsgHeader = addPV('MsgHeader' , app.msgHdr , 0, set=True)
self._pv_MsgPayload = addPV('MsgPayload', app.msgPayl, 0, set=True)
def addPV(label,reg,init=0,set=False):
pv = SharedPV(initial=NTScalar('I').wrap(init),
handler=L0DelayH(reg,self._app.partition,group))
provider.add(name+':'+label,pv)
if set:
self._app.partition.set(group)
reg.set(pipelinedepth_from_delay(init))
return pv
self._pv_L0Delay = addPV('L0Delay' , app.pipelineDepth, init['L0Delay'][group] if init else 90, set=True)
# initialize
self.put(None,None)
def addPV(label):
pv = SharedPV(initial=NTScalar('I').wrap(0),
handler=DefaultPVHandler())
provider.add(name+':'+label,pv)
return pv
self._pv_MsgConfigKey = addPV('MsgConfigKey')
self._inhibits = []
self._inhibits.append(PVInhibit(name, app, app.inh_0, group, 0))
self._inhibits.append(PVInhibit(name, app, app.inh_1, group, 1))
self._inhibits.append(PVInhibit(name, app, app.inh_2, group, 2))
self._inhibits.append(PVInhibit(name, app, app.inh_3, group, 3))
def dump(self):
print('Group: {} Master: {} RateSel: {:x} DestSel: {:x} Ena: {}'
.format(self._group, self._app.l0Master.get(), self._app.l0RateSel.get(), self._app.l0DestSel.get(), self._app.l0En.get()))
def setFixedRate(self):
rateVal = (0<<14) | (self._pv_FixedRate.current()['value']&0xf)
self._app.l0RateSel.set(rateVal)
def setACRate(self):
acRate = self._pv_ACRate .current()['value']
acTS = self._pv_ACTimeslot.current()['value']
rateVal = (1<<14) | ((acTS&0x3f)<<3) | (acRate&0x7)
self._app.l0RateSel.set(rateVal)
def setEventCode(self):
code = self._pv_EventCode.current()['value']
rateVal = (2<<14) | ((code&0xf0)<<4) | (code&0xf)
self._app.l0RateSel.set(rateVal)
def setSequence(self):
seqIdx = self._pv_Sequence.current()['value']
seqBit = self._pv_SeqBit .current()['value']
rateVal = (2<<14) | ((seqIdx&0x3f)<<8) | (seqBit&0xf)
self._app.l0RateSel.set(rateVal)
def setDestn(self):
mode = self._pv_DstMode.current()['value']
mask = self._pv_DstMask.current()['value']
destVal = (mode<<15) | (mask&0x7fff)
self._app.l0DestSel.set(destVal)
def master(self, pv, val):
lock.acquire()
self._app.partition.set(self._group)
forceUpdate(self._app.l0Master)
if val==0:
self._app.l0Master.set(0)
self._app.l0En .set(0)
self._stats._master = 0
curr = self._pv_Run.current()
curr['value'] = 0
self._pv_Run.post(curr)
else:
self._app.l0Master.set(1)
self._stats._master = 1
lock.release()
def put(self, pv, val):
lock.acquire()
self._app.partition.set(self._group)
forceUpdate(self._app.l0RateSel)
mode = self._pv_L0Select.current()['value']
if mode == RateSel.FixedRate:
self.setFixedRate()
elif mode == RateSel.ACRate:
self.setACRate()
elif mode == RateSel.EventCode:
self.setEventCode()
elif mode == RateSel.Sequence:
self.setSequence()
else:
print('L0Select mode invalid {}'.format(mode))
forceUpdate(self._app.l0DestSel)
self.setDestn()
self.dump()
lock.release()
def stepGroups(self, pv, val):
getattr(self._app,'stepGroup%i'%self._group).set(val)
def stepEnd(self, pv, val):
self.stepDone(False)
getattr(self._app,'stepEnd%i'%self._group).set(val)
def stepDone(self, val):
value = self._pv_StepDone.current()
value['value'] = 1 if val else 0
timev = divmod(float(time.time_ns()), 1.0e9)
value['timeStamp.secondsPastEpoch'], value['timeStamp.nanoseconds'] = timev
self._pv_StepDone.post(value)
def run(self, pv, val):
lock.acquire()
self._app.partition.set(self._group)
forceUpdate(self._app.l0En)
enable = 1 if val else 0
self._app.l0En.set(enable)
self.dump()
lock.release()
class GroupCtrls(object):
def __init__(self, name, app, stats, init=None):
def addPV(label,reg):
pv = SharedPV(initial=NTScalar('I').wrap(0),
handler=RegH(reg))
provider.add(name+':'+label,pv)
return pv
self._pv_l0Reset = addPV('GroupL0Reset' ,app.groupL0Reset)
self._pv_l0Enable = addPV('GroupL0Enable' ,app.groupL0Enable)
self._pv_l0Disable = addPV('GroupL0Disable',app.groupL0Disable)
self._pv_MsgInsert = addPV('GroupMsgInsert',app.groupMsgInsert)
self._groups = []
for i in range(8):
self._groups.append(GroupSetup(name+':PART:%d'%i, app, i, stats[i], init=init['PART'] if init else None))
# This is necessary in XTPG
app.groupL0Reset.set(0xff)
app.groupL0Reset.set(0)
class PVCtrls(object):
def __init__(self, p, m, name=None, ip=None, xpm=None, stats=None, db=None, cuInit=False):
global provider
provider = p
global lock
lock = m
# Assign transmit link ID
ip_comp = ip.split('.')
xpm_num = name.rsplit(':',1)[1]
v = 0xff00000 | ((int(xpm_num)&0xf)<<16) | ((int(ip_comp[2])&0xf)<<12) | ((int(ip_comp[3])&0xff)<< 4)
xpm.XpmApp.paddr.set(v)
print('Set PADDR to 0x{:x}'.format(v))
self._name = name
self._ip = ip
self._xpm = xpm
self._db = db
init = None
try:
db_url, db_name, db_instrument, db_alias = db.split(',',4)
print('db {:}'.format(db))
print('url {:} name {:} instr {:} alias {:}'.format(db_url,db_name,db_instrument,db_alias))
print('device {:}'.format(name))
init = get_config_with_params(db_url, db_instrument, db_name, db_alias, name)
print('cfg {:}'.format(init))
except:
print('Caught exception reading configdb [{:}]'.format(db))
self._links = []
for i in range(24):
self._links.append(LinkCtrls(name, xpm, i))
app = xpm.XpmApp
self._pv_amcDumpPLL = []
for i in range(2):
pv = SharedPV(initial=NTScalar('I').wrap(0),
handler=IdxCmdH(app.amcPLL.Dump,app.amc,i))
provider.add(name+':DumpPll%d'%i,pv)
self._pv_amcDumpPLL.append(pv)
self._cu = CuGenCtrls(name+':XTPG', xpm, dbinit=init)
self._group = GroupCtrls(name, app, stats, init=init)
# The following section will throw an exception if the CuInput PV is not set properly
if not cuInit:
self._seq = PVSeq(provider, name+':SEQENG:0', ip, Engine(0, xpm.SeqEng_0))
self._pv_dumpSeq = SharedPV(initial=NTScalar('I').wrap(0),
handler=CmdH(self._seq._eng.dump))
provider.add(name+':DumpSeq',self._pv_dumpSeq)
self._pv_usRxReset = SharedPV(initial=NTScalar('I').wrap(0),
handler=CmdH(xpm.UsTiming.C_RxReset))
provider.add(name+':Us:RxReset',self._pv_usRxReset)
self._pv_cuRxReset = SharedPV(initial=NTScalar('I').wrap(0),
handler=CmdH(xpm.CuTiming.C_RxReset))
provider.add(name+':Cu:RxReset',self._pv_cuRxReset)
self._thread = threading.Thread(target=self.notify)
self._thread.start()
def update(self):
global countdn
# check for config save
if countdn > 0:
countdn -= 1
if countdn == 0 and self._db:
# save config
print('Updating {}'.format(self._db))
db_url, db_name, db_instrument, db_alias = self._db.split(',',4)
mycdb = cdb.configdb(db_url, db_instrument, True, db_name)
mycdb.add_device_config('xpm')
top = cdict()
top.setInfo('xpm', self._name, None, 'serial1234', 'No comment')
top.setAlg('config', [0,0,0])
lock.acquire()
top.set('XTPG.CuDelay' , self._xpm.CuGenerator.cuDelay.get() , 'UINT32')
top.set('XTPG.CuBeamCode', self._xpm.CuGenerator.cuBeamCode.get() , 'UINT8')
top.set('XTPG.CuInput' , self._xpm.AxiSy56040.OutputConfig[0].get(), 'UINT8')
v = []
for i in range(8):
self._xpm.XpmApp.partition.set(i)
v.append( self._xpm.XpmApp.l0Delay.get() )
top.set('PART.L0Delay', v, 'UINT32')
lock.release()
if not db_alias in mycdb.get_aliases():
mycdb.add_alias(db_alias)
try:
mycdb.modify_device(db_alias, top)
except:
pass
def notify(self):
client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
client.connect((self._ip,8197))
msg = b'\x00\x00\x00\x00'
client.send(msg)
while True:
msg = client.recv(256)
s = struct.Struct('H')
siter = s.iter_unpack(msg)
src = next(siter)[0]
print('src {:x}'.format(src))
if src==0: # sequence notify message
mask = next(siter)[0]
i=0
while mask!=0:
if mask&1:
addr = next(siter)[0]
print('addr[{}] {:x}'.format(i,addr))
if i<1:
self._seq.checkPoint(addr)
i += 1
mask = mask>>1
elif src==1: # step end message
group = next(siter)[0]
self._group._groups[group].stepDone(True)
| [
"[email protected]"
] | |
0555f1450060ed1571993e78c9aaa4d36810230d | 8c91c587431f1bdd635056f9c400a85f651036d4 | /myProject/settings.py | c578570541199cfbffd273935a34e23fdd1e1e9c | [] | no_license | sajjad0057/Musician-Hub--2 | fc0208ca6adac18b00c46cb2ec6b991dce154c05 | 18d5156c02a57f369c5dec3a41c45660cc78608b | refs/heads/master | 2023-01-14T16:51:45.996964 | 2020-11-26T20:31:14 | 2020-11-26T20:31:14 | 316,331,253 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,342 | py | """
Django settings for myProject project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATES_DIR=(os.path.join(BASE_DIR,'templates'))
STATIC_DIR=(os.path.join(BASE_DIR,'static'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'f(zu!ciksc!%8tqt7t2)ebicai(77$9!*tfbzv=h0#rgi718ir'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'myApp_1',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myProject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATES_DIR,],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myProject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'my_first_django',
'USER': 'root',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
STATIC_DIR,
]
| [
"[email protected]"
] | |
5a07a92a98ffc1815112392dbca952a1a506dd2a | 59de7788673ade984b9c9fbc33664a7cbdba67d3 | /res/scripts/client/tutorial/control/summary.py | e547a4605339a90053a71c308da810726c10252a | [] | no_license | webiumsk/WOT-0.9.15-CT | 3fa24ab37a6c91b7073034afb2f355efa5b7fe36 | fbd194fbaa6bdece51c7a68fc35bbb5257948341 | refs/heads/master | 2020-12-24T21:27:23.175774 | 2016-05-01T13:47:44 | 2016-05-01T13:47:44 | 57,600,180 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 3,291 | py | # 2016.05.01 15:25:27 Střední Evropa (letní čas)
# Embedded file name: scripts/client/tutorial/control/summary.py
from tutorial.control.functional import FunctionalVarSet
from tutorial.logger import LOG_ERROR, LOG_DEBUG
class _Flag(object):
def __init__(self, name, active, store = True):
super(_Flag, self).__init__()
self.name = name
self.active = active
self.store = store
def __repr__(self):
return '{0:>s}: {1!r:s}'.format(self.name, self.active)
def isActive(self):
return self.active
def activate(self):
self.active = True
def deactivate(self):
self.active = False
class FlagSummary(object):
def __init__(self, flagNames, initial = None):
super(FlagSummary, self).__init__()
if flagNames is None:
flagNames = []
if initial is None:
initial = {}
self.__flags = {}
initialGetter = initial.get
flagNames.extend(initial.keys())
for name in set(flagNames):
self.__flags[name] = _Flag(name, initialGetter(name, False))
return
def __repr__(self):
return 'FlagSummary({0:s}): {1!r:s}'.format(hex(id(self)), self.__flags.values())
def deactivateFlag(self, flagName):
LOG_DEBUG('Deactivate flag', flagName)
if flagName in self.__flags:
self.__flags[flagName].deactivate()
else:
self.__flags[flagName] = _Flag(flagName, False, store=False)
def activateFlag(self, flagName):
LOG_DEBUG('Activate flag: ', flagName)
if flagName in self.__flags:
self.__flags[flagName].activate()
else:
self.__flags[flagName] = _Flag(flagName, True, store=False)
def isActiveFlag(self, flagName):
activeFlag = False
if flagName in self.__flags:
activeFlag = self.__flags[flagName].isActive()
return activeFlag
def addFlag(self, flagName):
if flagName not in self.__flags:
self.__flags[flagName] = _Flag(flagName, False)
def getDict(self):
filtered = filter(lambda flag: flag.store, self.__flags.itervalues())
return dict(map(lambda flag: (flag.name, flag.active), filtered))
class VarSummary(object):
def __init__(self, varSets, runtime = None):
super(VarSummary, self).__init__()
if varSets:
self.__varSets = dict(map(lambda varSet: (varSet.getID(), FunctionalVarSet(varSet)), varSets))
else:
self.__varSets = {}
self.__runtime = runtime or {}
def get(self, varID, default = None):
if varID in self.__varSets:
result = self.__varSets[varID].getFirstActual()
else:
result = self.__runtime.get(varID, default)
return result
def set(self, varID, value):
if varID in self.__varSets:
LOG_ERROR('Var {0:>s} in not mutable.'.format(varID))
else:
LOG_DEBUG('Set var {0:>s}'.format(varID), value)
self.__runtime[varID] = value
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\tutorial\control\summary.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.05.01 15:25:27 Střední Evropa (letní čas)
| [
"[email protected]"
] | |
c495d41b0499d638fd6f1b6b31a43fd9aeb1a2ee | 80a3d98eae1d755d6914b5cbde63fd10f5cc2046 | /autox/autox_competition/feature_engineer/__init__.py | 54c1f55b4e4c6a8fda4cc01b63e5c77a85f20ed1 | [
"Apache-2.0"
] | permissive | 4paradigm/AutoX | efda57b51b586209e1d58e1dab7d0797083aadc5 | 7eab9f4744329a225ff01bb5ec360c4662e1e52e | refs/heads/master | 2023-05-24T00:53:37.109036 | 2023-02-14T14:21:50 | 2023-02-14T14:21:50 | 388,068,949 | 752 | 162 | Apache-2.0 | 2022-07-12T08:28:09 | 2021-07-21T09:45:41 | Jupyter Notebook | UTF-8 | Python | false | false | 782 | py | from .fe_count import FeatureCount
from .fe_stat import FeatureStat
from .fe_rank import FeatureRank
from .fe_target_encoding import FeatureTargetEncoding
from .fe_dimension_reduction import FeatureDimensionReduction
from .fe_nlp import FeatureNlp
from .fe_denoising_autoencoder import FeatureDenoisingAutoencoder
from .fe_shift import FeatureShift
from .fe_diff import FeatureDiff
from .fe_cumsum import FeatureCumsum
from .fe_time import FeatureTime
from .fe_one2M import FeatureOne2M
from .fe_shift_ts import FeatureShiftTS
from .fe_rolling_stat_ts import FeatureRollingStatTS
from .fe_exp_weighted_mean import FeatureExpWeightedMean
from .fe_image2vec import fe_ima2vec
from .fe_gbdt import FeatureGbdt
from .fe_one2many import FeatureOne2Many
from .fe_cross import FeatureCross | [
"[email protected]"
] | |
7617720cd8cab93ae19e77a855aafa05ba4ce40a | fdfb0ef1757c899cbde0aa5fb8dccf17f54451ef | /utility.py | 2ee5720509ff9b9f722596cb54ac071a89107926 | [] | no_license | schinwald/sorter-demo | 0e5e3d9cdda00d0651aa7f5ed43eb196365a6806 | 62bba2d6a06e12c7103f30e370b509f4f5cc2bea | refs/heads/master | 2020-03-20T19:16:07.513132 | 2018-08-30T00:57:09 | 2018-08-30T00:57:09 | 137,629,660 | 2 | 0 | null | 2018-08-30T00:57:09 | 2018-06-17T04:48:19 | Python | UTF-8 | Python | false | false | 211 | py | # Description:
# Common functions needed by sorting algorithms
#
# Author: James Schinwald
# swaps to elements in an array
def swap(array, a, b):
temp = array[a]
array[a] = array[b]
array[b] = temp | [
"[email protected]"
] | |
524aeb845c4efb6e5784fd628ae8a40f7f099a43 | 012e922a357ff60abe0dd4c1e13df2820886a7a4 | /backend/csi1_28203/settings.py | 8423d39667e59e29b80186f265f97bf7558e63e7 | [] | no_license | crowdbotics-apps/csi1-28203 | 73bc3dfca64f5517d3d9a57dd32c052ac2657394 | 5e9b9f92c97e17e6176ae8a5650470d63d19bede | refs/heads/master | 2023-06-09T16:50:24.413408 | 2021-06-24T13:18:03 | 2021-06-24T13:18:03 | 379,931,015 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,096 | py | """
Django settings for csi1_28203 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'modules',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
# start fcm_django push notifications
'fcm_django',
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'csi1_28203.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'csi1_28203.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {
"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")
}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"[email protected]"
] | |
9dc24245f7328179558ddd6f30113d92a1289fae | 1d9e6b08510537976552dde2e5f3bd6befea4a55 | /coding_test/venv/bin/pip | f1b5fc84f83d24b895956a9bcdbe89f5c4a6c673 | [] | no_license | dan8919/python | 3232a30882207cf04bb1064b5697afbe2a7e1704 | c07d52de49050b20f6d4ee58cff8923518938c73 | refs/heads/master | 2022-12-13T18:29:30.185429 | 2020-09-12T02:47:05 | 2020-09-12T02:47:05 | 294,595,153 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 265 | #!/Users/cuixindan/PycharmProjects/coding_test/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
b81d61e852105a9811e5bc89828c9dd62c172ace | b47ee8aed3c76d1a9222f3563bb6592f7e9d6eed | /selection_sort.py | a7f50605d53fe7ed772fc1503fed20d98c834a7c | [] | no_license | ranjithkumar121/Python | 3697554852335d9af8683a554a3f36e146ab109e | 5af562edca1a792141974d26962415533c12045e | refs/heads/main | 2023-06-19T00:18:08.191503 | 2021-07-12T07:05:18 | 2021-07-12T07:05:18 | 347,558,862 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 302 | py | def selection_sort(lst):
for i in range(len(lst)):
min_value=i
for j in range(i+1,len(lst)):
if lst[min_value]>lst[j]:
min_value=j
lst[i],lst[min_value]=lst[min_value],lst[i]
return lst
lst=[4,8,1,8,10,2,5]
print(selection_sort(lst)) | [
"[email protected]"
] | |
6526c5b02b70dc5bb52035135f937754e2b56c49 | 59d832eac4947e88a2bc8e821d6544ef8ad76df3 | /spark.py | f1234d57295e0a657eec815fd158578434e44011 | [] | no_license | jianyigengge/spark | f4834910da556900f171d6acc7b550e33018a687 | 74e682f71214789e33c54f2f8929ef7a45fa313e | refs/heads/master | 2020-04-15T10:22:26.704860 | 2019-01-08T07:49:47 | 2019-01-08T07:49:47 | 164,593,650 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,306 | py | # -*- coding: utf-8 -*-
#导入需要的模块
from pyspark.mllib.classification import LogisticRegressionWithSGD
from pyspark.mllib.regression import LabeledPoint
from pyspark import SparkContext
from pyspark.mllib.classification import SVMWithSGD
from pyspark.mllib.tree import RandomForest
#关闭上一个进程
try:
sc.stop()
except:
pass
#创建一个sc
sc=SparkContext('local[1]','lr')
#将数据转为labelpoint格式
def create_label_point(line):
line=line.strip('\n').split(',')
return LabeledPoint(float(line[len(line)-1]), [float(x) for x in line[1:len(line)-1]])
#分别对训练集、测试集数据进行转换
data_train=sc.textFile("file:///home/student/Workshop+xiaojianyi/data_train.csv").map(create_label_point)
data_test=sc.textFile("file:///home/student/Workshop+xiaojianyi/data_test.csv").map(create_label_point)
#逻辑回归拟合训练集,并且计算测试集准确率
lrm = LogisticRegressionWithSGD.train(data_train)
pred1=lrm.predict(data_test.map(lambda x:x.features))
label_and_pred1=data_test.map(lambda x: x.label).zip(pred1)
lrm_acc=label_and_pred1.filter(lambda(x,y):x==y).count()/float(data_test.count())
#print("lrm_acc:%f"%lrm_acc)
#SVM拟合训练集,并且计算测试集准确率
svm=SVMWithSGD.train(data_train)
pred2=svm.predict(data_test.map(lambda x:x.features))
label_and_pred2=data_test.map(lambda x: x.label).zip(pred2)
svm_acc=label_and_pred2.filter(lambda(x,y):x==y).count()/float(data_test.count())
#print("svm_acc:%f"%svm_acc)
#随机森林拟合训练集,并且计算测试集准确率
rf = RandomForest.trainClassifier(data_train, numClasses=2,
categoricalFeaturesInfo={},
numTrees=200,
featureSubsetStrategy="auto",
impurity="gini",
maxDepth=10,
maxBins=32,
seed=12)
pred3=rf.predict(data_test.map(lambda x:x.features))
label_and_pred3=data_test.map(lambda x: x.label).zip(pred3)
rf_acc=label_and_pred3.filter(lambda(x,y):x==y).count()/float(data_test.count())
#分别打印出三种模型在测试集上的准确率(lr,svm,randomforest)
print("lrm_acc:%f"%lrm_acc)
print("svm_acc:%f"%svm_acc)
print("rf_acc:%f"%rf_acc)
| [
"[email protected]"
] | |
578c807f059ce2cad367335f76393e947a627dec | e5f5fa6d8e148cf515dc33d59244c8bcdae368f0 | /students/thorn/lesson02/series.py | 8078d3c6f6be633ea66d527d52191165fcc95040 | [] | no_license | nataliewang919/Self_Paced-Online | 88de884d803ef04ca4608e856bbc600e9d852d2b | e2e20d49782610016a918dc50cf4eceb9000b8a0 | refs/heads/master | 2021-04-30T00:29:22.086971 | 2018-02-15T03:49:02 | 2018-02-15T03:49:02 | 121,460,168 | 0 | 0 | null | 2018-02-14T02:03:23 | 2018-02-14T02:03:23 | null | UTF-8 | Python | false | false | 2,249 | py | '''
Thomas Horn
The following definitions are used to computer the Fibonacci and Lucas Series
throughout this program.
Fibonacci - starts with 0, 1 and sums the previous 2 numbers
Lucas - starts with 1, 2 and sums the previous 2 numbers
'''
def fibonacci(n):
""" Returns the nth value in the fibonacci sequence. """
if n <= 1:
return 0
else:
first, second = 0, 1
while n > 0:
# first becomes the second number, and the second both nums added.
first, second = second, (first+second)
n -= 1
return first
def lucas(n):
""" Returns the nth value in the lucas sequence. Identical to fibonacci
except for different starting values.
"""
if n < 1:
return 0
else:
# This seems backwards but seems to works in this order.
first, second = 2, 1
while n > 0:
first, second = second, (first+second)
n -= 1
return first
def sum_series(n, first=0, second=1):
""" sum_series defaults to the fibonacci sequence if only n is assigned a
parameter.
If first and second are given values, they will produce the nth value
in a similar fashion to the lucas series beginning with the two optional
parameters.
"""
if n < 1:
return 0
else:
while n > 0:
first, second = second, (first+second)
n -= 1
return first
if __name__ == "__main__":
# Test of fibonacci values.
print("Fibonacci: ")
# Correct value.
try:
assert (fibonacci(10) == 55)
except AssertionError:
print("Incorrect value.")
# Incorrect value.
try:
assert (fibonacci(8) == 55)
except AssertionError:
print("Incorrect value.")
# Test of lucas values.
print("Lucas: ")
# Correct values.
assert (lucas(7) == 29)
assert (lucas(10) == 123)
# Incorrect value.
try:
assert (lucas(10) == 29)
except AssertionError:
print("Incorrect value.")
# Test to ensure sum_series is returning the correct value for the correct
# sequence.
# Fibonacci value.
assert (sum_series(10) == 55)
assert (sum_series(10, 2, 1 == 123))
| [
"[email protected]"
] | |
a6437ae02fe2a9cc66bc0a3160a7b099ef4f553c | 110d53cf8971d9b475b1eb1c184f80b0936d7633 | /myexcel/storetomysql.py | 2f321e723cecd13eb1aa6ffbcbd449a73c501060 | [] | no_license | gitghought/python1 | e8553ae0f0d3a84ce2f1283d23db9863bb79457f | b9a86696679dca4f8532fe2e1ed1fe1606fdbb80 | refs/heads/master | 2020-04-23T21:27:42.548827 | 2019-04-18T09:15:07 | 2019-04-18T09:15:07 | 171,471,293 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,695 | py | import pymysql
from myexcel.MyInclude import MyInclude
import json
class MySQLConnection () :
#服务器端的ip地址
serverIP = "localhost"
#数据库用户名
username = "root"
#数据库密码
password = ""
#数据库名
databaseName = "studentscore"
#在读取数据库中的图片时,使用的地址
mysqlPrePath = serverIP
keyFiveTimesClass = "sclass"
keyFiveTimesTherySname = "sname"
keyFiveTimesTheryFiveDayTheryAvg= "fiveDayTheryAvg"
keyFiveTimesTheryTenDayTheryAvg= "tenDayTheryAvg"
keyFiveTimesTheryFiftenDayTheryAvg= "fiftenDayTheryAvg"
keyFiveTimesTheryMonthTheryAvg= "monthTheryAvg"
keyFiveTimesSkillSname = "sname"
keyFiveTimesSkillFiveDaySkillAvg= "fiveDaySkillAvg"
keyFiveTimesSkillTenDaySkillAvg= "tenDaySkillAvg"
keyFiveTimesSkillFifteenDaySkillAvg= "fiftenDaySkillAvg"
keyFiveTimesSkillMonthSkillAvg= "monthSkillAvg"
class ScoreTheryObject() :
sname = ""
srank = ""
sclass = ""
sFiveDayTheryAvg = ""
sTenDayTheryAvg = ""
sFiftenDayTheryAvg = ""
sMonthTheryAvg = ""
class ScoreToMySQL :
keySname = "sname"
keySkillAvgList = [ "fiveDaySkillAvg", "tenDaySkillAvg", "fiftenDaySkillAvg", "monthSkillAvg" ]
keyTheryAvgList = [ "fiveDayTheryAvg", "tenDayTheryAvg", "fiftenDayTheryAvg", "monthTheryAvg" , "srank"]
keyEveryDayTheryList = [
"day0",
"day1",
"day2",
"day3",
"day4",
"day5",
"day6",
"day7",
"day8",
"day9",
"day10",
"day11",
"day12",
"day13",
"day14",
"day15",
"day16",
"day17",
"day18",
"day19",
"day20",
"day21"
]
def myConnect(this):
this.db = pymysql.connect(
MySQLConnection.mysqlPrePath, # 数据库对应的地址
MySQLConnection.username, # 数据库登录用户名
MySQLConnection.password, # 数据库登录密码
MySQLConnection.databaseName # 数据库名称
)
return this.db
#返回一个游标
def getCursor(this):
#如果在此处发生异常,说明当前对象还没有调用myConnect方法
try:
this.cursor = this.db.cursor()
except AttributeError as e :
this.myConnect()
else :
pass
finally :
this.cursor = this.db.cursor()
return this.cursor
# 用于将班级信息组成一个元组
def getClassTuple(sclass):
return ("sclass", sclass)
#将封装好数据的字典添加到数据库种
# 字典种必须封装表中需要的所有数据
def insertIntoFiveTimesSkillDict(this, dict):
try :
#执行向数据库插入数据的操作
this.cursor.execute("insert into "
"fiveTimesSkill ("
"sname,"
"fiveDaySkillAvg,"
"tenDaySkillAvg,"
"fiftenDaySkillAvg,"
"monthSkillAvg"
") "
"values("
"'{sname}',"
"'{fiveDaySkillAvg}',"
"'{tenDaySkillAvg}',"
"'{fiftenDaySkillAvg}',"
"'{monthSkillAvg}'"
")".format(
sname=dict[keyFiveTimesSkillSname],
fiveDaySkillAvg=dict[keyFiveTimesSkillFiveDaySkillAvg],
tenDaySkillAvg=dict[keyFiveTimesSkillTenDaySkillAvg],
fiftenDaySkillAvg=dict[keyFiveTimesSkillFifteenDaySkillAvg],
monthSkillAvg=dict[keyFiveTimesSkillMonthSkillAvg]
))
except AttributeError as e:
this.cursor = this.getCursor()
else :
pass
this.db.commit()
finally:
# 执行向数据库插入数据的操作
this.cursor.execute("insert into "
"fiveTimesSkill ("
"sname,"
"fiveDaySkillAvg,"
"tenDaySkillAvg,"
"fiftenDaySkillAvg,"
"monthSkillAvg"
") "
"values("
"'{sname}',"
"'{fiveDaySkillAvg}',"
"'{tenDaySkillAvg}',"
"'{fiftenDaySkillAvg}',"
"'{monthSkillAvg}'"
")".format(
sname=dict[keyFiveTimesSkillSname],
fiveDaySkillAvg=dict[keyFiveTimesSkillFiveDaySkillAvg],
tenDaySkillAvg=dict[keyFiveTimesSkillTenDaySkillAvg],
fiftenDaySkillAvg=dict[keyFiveTimesSkillFifteenDaySkillAvg],
monthSkillAvg=dict[keyFiveTimesSkillMonthSkillAvg]
))
this.db.commit()
#将封装好数据的字典添加到数据库种
# 字典种必须封装表中需要的所有数据
def insertIntoFiveTimesTheryDict(this, dict):
this.cursor = this.getCursor()
try:
#执行向数据库插入数据的操作
this.cursor.execute("insert into "
"fiveTimesThery ("
"sname,"
"fiveDayTheryAvg,"
"tenDayTheryAvg,"
"fiftenDayTheryAvg,"
"monthTheryAvg,"
"sclass"
") "
"values("
"'{sname}',"
"'{fiveDayTheryAvg}',"
"'{tenDayTheryAvg}',"
"'{fiftenDayTheryAvg}',"
"'{monthTheryAvg}',"
"'{sclass}'"
")".format(
sname=dict[keyFiveTimesTherySname],
fiveDayTheryAvg=dict[keyFiveTimesTheryFiveDayTheryAvg],
tenDayTheryAvg=dict[keyFiveTimesTheryTenDayTheryAvg],
fiftenDayTheryAvg=dict[keyFiveTimesTheryFiftenDayTheryAvg],
monthTheryAvg=dict[keyFiveTimesTheryMonthTheryAvg],
sclass=dict[keyFiveTimesClass]
))
except pymysql.err.IntegrityError as e :
pass
this.db.commit()
# 将封装好数据的字典添加到数据库种
# 字典种必须封装表中需要的所有数据
def insertIntoEveryDayTheryDict(this, dict):
# 执行向数据库插入数据的操作
this.cursor = this.getCursor()
# 执行向数据库插入数据的操作
this.cursor.execute("insert into "
"everydaythery ("
"sname,"
"day1 ,"
"day2 ,"
"day3 ,"
"day4 ,"
"day5 ,"
"day6 ,"
"day7 ,"
"day8 ,"
"day9 ,"
"day10,"
"day11,"
"day12,"
"day13,"
"day14,"
"day15,"
"day16,"
"day17,"
"day18,"
"day19,"
"day20,"
"day21"
") "
"values("
"'{sname}',"
"'{day1}',"
"'{day2}',"
"'{day3}',"
"'{day4}',"
"'{day5}',"
"'{day6}',"
"'{day7}',"
"'{day8}',"
"'{day9}',"
"'{day10}',"
"'{day11}',"
"'{day12}',"
"'{day13}',"
"'{day14}',"
"'{day15}',"
"'{day16}',"
"'{day17}',"
"'{day18}',"
"'{day19}',"
"'{day20}',"
"'{day21}'"
")".format(
sname=dict[this.keySname],
day1 =dict[this.keyEveryDayTheryList[1]],
day2 =dict[this.keyEveryDayTheryList[2]],
day3 =dict[this.keyEveryDayTheryList[3]],
day4 =dict[this.keyEveryDayTheryList[4]],
day5 =dict[this.keyEveryDayTheryList[5]],
day6 =dict[this.keyEveryDayTheryList[6]],
day7 =dict[this.keyEveryDayTheryList[7]],
day8 =dict[this.keyEveryDayTheryList[8]],
day9 =dict[this.keyEveryDayTheryList[9]],
day10=dict[this.keyEveryDayTheryList[10]],
day11=dict[this.keyEveryDayTheryList[11]],
day12=dict[this.keyEveryDayTheryList[12]],
day13=dict[this.keyEveryDayTheryList[13]],
day14=dict[this.keyEveryDayTheryList[14]],
day15=dict[this.keyEveryDayTheryList[15]],
day16=dict[this.keyEveryDayTheryList[16]],
day17=dict[this.keyEveryDayTheryList[17]],
day18=dict[this.keyEveryDayTheryList[18]],
day19=dict[this.keyEveryDayTheryList[19]],
day20=dict[this.keyEveryDayTheryList[20]],
day21=dict[this.keyEveryDayTheryList[21]],
))
this.db.commit()
# 当执行完数据库查询操作后,
# 将查询的数据取出保存到一个对象
def getObjFromCursor (this):
olist = []
rows = this.cursor.fetchall()
for row in rows :
sto = ScoreTheryObject ()
sto.sname = row[1]
sto.sFiveDayTheryAvg = row[2]
sto.sTenDayTheryAvg = row[3]
sto.sFiftenDayTheryAvg = row[4]
sto.sMonthTheryAvg = row[5]
sto.sclass = row[6]
sto.srank = row[7]
olist.append(sto)
return olist
# 当执行完数据库查询操作后,
# 将查询的数据取出保存到字典
# 再将每个字典添加到一个列表
# 最后将列表返回
def __getValuesFromCursor(this):
slist = []
sdict = {}
rows = this.cursor.fetchall()
for row in rows :
avglist = []
#获取四次平均成绩,每组平均成绩存放到一个元组中
for pos in range(4) :
# print(row[2 + pos].rjust(20, " "), end=" ")
mtuple = (this.keyTheryAvgList[pos], row[2+pos])
avglist.append(mtuple)
#将四次均分做成一个字典
avgDict = dict(avglist)
# 将学生姓名与他四次的均分做成一个新的字典
# 是嵌套字典
sdict = {row[1]:avgDict}
# print(sdict)
slist.append(sdict)
return slist
def selectFromFiveTimesTheryWithOrderByInNameList(this,orderby, nameListStr):
this.getCursor()
this.cursor.execute(""
"select c.* from "
" (select f.*, @rank:=@rank + 1 as num "
" from fivetimesthery f , (select @rank:= 0) b "
" order by {orderby}) c "
# "where sname in ('李浩','周帅')".format(orderby = orderby))
"where sname in ({nameListstr})".format(nameListstr = nameListStr, orderby = orderby))
olist = this.getObjFromCursor()
print(olist)
this.db.commit()
return olist
# 从数据库中检索数据
# orderby指定排序的规则
# 该函数返回的是一个由字段做元素的列表
def selectFromFiveTimesThery(this, orderby):
this.getCursor()
this.cursor.execute(
"SELECT * FROM "
"`fiveTimesThery` "
"where sclass = '{sclass}' "
"order by {orderby} "
"desc limit 0,34".format(sclass=MyInclude.scoreExcelPrefix,orderby=orderby)
)
# this.cursor.execute(
# "SELECT * FROM "
# "`fiveTimesThery` "
# "order by {orderby} "
# "desc limit 0,34".format(sclass=MyInclude.scoreExcelPrefix,orderby=orderby)
# )
slist= this.__getValuesFromCursor()
this.db.commit()
# print(slist)
return slist
#执行关闭数据库的操作
# 在完成一个数据库操作后就执行这个函数
def disConnect(this):
this.db.close()
if __name__ == '__main__':
dictScore = {
"sname":"hehehehe",
"day1":"day1",
"day2":"day2",
"day3":"day3",
"day4":"day4",
"day5":"day5",
"day6":"day6",
"day7":"day7",
"day8":"day8",
"day9":"day9",
"day10":"day10",
"day11":"day11",
"day12":"day12",
"day13":"day13",
"day14":"day14",
"day15":"day15",
"day16":"day16",
"day17":"day17",
"day18":"day18",
"day19":"day19",
"day20":"day20",
"day21":"day21"
}
"""
以下是数据库使用的步骤
1、创建数据库对象
2、添加数据库到数据库
3、关闭数据库连接
"""
#创建数据库对象
mstm = ScoreToMySQL()
""""
from fivetimesthery f , (select @rank:= 0) b "
" order by {orderby}) c "
# "where sname in ('李浩','周帅')".
"""
olist = mstm.selectFromFiveTimesTheryWithOrderByInNameList(orderby="fiftenDayTheryAvg", nameListStr="'李浩','周帅'")
# # 将准备的字典数据添加到数据库
# mstm.insertIntoEveryDayTheryDict(dictScore)
# mstm.selectFromFiveTimesThery(mstm.keyTheryAvgList[3])
# mstm.insertIntoFiveTimesSkillDict()
# 关闭本次数据库连接
mstm.disConnect()
| [
"[email protected]"
] | |
b1cc9ddc62e8e64b7ceefb5b598d2ce0edcb0644 | ccb0133cd3dbd2f430a81fb85cf5d186b1cfc426 | /securetrading/abstractstobject.py | 4962b378dbd8e3a854cdc5cd3ebd6266bde50290 | [
"MIT"
] | permissive | SecureTrading/st-python-api | eeec5fdafd962fd6d612296448be61b1a8e196b3 | 812b99e5f8d079c3c5c735b6ee79d406fa4ab15c | refs/heads/master | 2022-11-14T05:59:43.010306 | 2022-10-25T19:48:09 | 2022-10-25T19:48:09 | 56,777,572 | 2 | 1 | MIT | 2019-08-18T13:47:49 | 2016-04-21T13:49:50 | Python | UTF-8 | Python | false | false | 1,445 | py | from __future__ import unicode_literals
import securetrading
class AbstractStObject(dict):
"""The default Object class inherited by all Secure Trading Objects."""
def __init__(self, request_reference=None):
super(AbstractStObject, self).__init__()
def verify(self):
"""This should be overwritten by the child class
if required."""
pass
def update(self, data):
"""Updates the Secure Trading Object with data.
This update method will set the data onto the Secure Trading Object and
will also automatically call validation on some of the values inserted.
Args:
data: A dictionary of values to insert.
Raises:
This method will raise a securetrading.ApiError during the
automatic validation, if the value is invalid.
"""
for key in data:
self.__setitem__(key, data[key])
def __setitem__(self, key, value, use_set_method=True):
debug = "{0} Setting {1}".format(self.get("requestreference"), key)
securetrading.util.logger.debug(debug)
validate_method = "_validate_{0}".format(key)
if hasattr(self, validate_method):
getattr(self, validate_method)(value)
set_method = "_set_{0}".format(key)
if use_set_method and hasattr(self, set_method):
getattr(self, set_method)(value)
else:
super(AbstractStObject, self).__setitem__(key, value)
| [
"[email protected]"
] | |
27384ee911c97c609e72c2b8da5bc8f75cc6ba5f | cf0cd48fad4083cd6b601b5250b884fa77911ec7 | /JavaLexer.py | 4f14750ed85b21dd33a6ae75552c15ccad90bdee | [] | no_license | hatc/C4.5-decision-tree-cpp | 9f1a3aea89aedf503fffd28da1610aaf014a573f | 0ea0f91e79ee56292aa2b93e72ce4ed7faf2e2b8 | refs/heads/master | 2021-01-20T11:06:12.366590 | 2013-01-24T21:29:35 | 2013-01-24T21:29:35 | 7,760,788 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130,341 | py | # $ANTLR 3.4 Java.g 2012-09-20 01:12:57
import sys
from antlr3 import *
from antlr3.compat import set, frozenset
# for convenience in actions
HIDDEN = BaseRecognizer.HIDDEN
# token types
EOF=-1
ABSTRACT=4
AND=5
AND_ASSIGN=6
ANNOTATION_INIT_ARRAY_ELEMENT=7
ANNOTATION_INIT_BLOCK=8
ANNOTATION_INIT_DEFAULT_KEY=9
ANNOTATION_INIT_KEY_LIST=10
ANNOTATION_LIST=11
ANNOTATION_METHOD_DECL=12
ANNOTATION_SCOPE=13
ANNOTATION_TOP_LEVEL_SCOPE=14
ARGUMENT_LIST=15
ARRAY_DECLARATOR=16
ARRAY_DECLARATOR_LIST=17
ARRAY_ELEMENT_ACCESS=18
ARRAY_INITIALIZER=19
ASSERT=20
ASSIGN=21
AT=22
BIT_SHIFT_RIGHT=23
BIT_SHIFT_RIGHT_ASSIGN=24
BLOCK_SCOPE=25
BOOLEAN=26
BREAK=27
BYTE=28
CASE=29
CAST_EXPR=30
CATCH=31
CATCH_CLAUSE_LIST=32
CHAR=33
CHARACTER_LITERAL=34
CLASS=35
CLASS_CONSTRUCTOR_CALL=36
CLASS_INSTANCE_INITIALIZER=37
CLASS_STATIC_INITIALIZER=38
CLASS_TOP_LEVEL_SCOPE=39
COLON=40
COMMA=41
COMMENT=42
CONSTRUCTOR_DECL=43
CONTINUE=44
DEC=45
DECIMAL_LITERAL=46
DEFAULT=47
DIV=48
DIV_ASSIGN=49
DO=50
DOT=51
DOTSTAR=52
DOUBLE=53
ELLIPSIS=54
ELSE=55
ENUM=56
ENUM_TOP_LEVEL_SCOPE=57
EQUAL=58
ESCAPE_SEQUENCE=59
EXPONENT=60
EXPR=61
EXTENDS=62
EXTENDS_BOUND_LIST=63
EXTENDS_CLAUSE=64
FALSE=65
FINAL=66
FINALLY=67
FLOAT=68
FLOATING_POINT_LITERAL=69
FLOAT_TYPE_SUFFIX=70
FOR=71
FORMAL_PARAM_LIST=72
FORMAL_PARAM_STD_DECL=73
FORMAL_PARAM_VARARG_DECL=74
FOR_CONDITION=75
FOR_EACH=76
FOR_INIT=77
FOR_UPDATE=78
FUNCTION_METHOD_DECL=79
GENERIC_TYPE_ARG_LIST=80
GENERIC_TYPE_PARAM_LIST=81
GREATER_OR_EQUAL=82
GREATER_THAN=83
HEX_DIGIT=84
HEX_LITERAL=85
IDENT=86
IF=87
IMPLEMENTS=88
IMPLEMENTS_CLAUSE=89
IMPORT=90
INC=91
INSTANCEOF=92
INT=93
INTEGER_TYPE_SUFFIX=94
INTERFACE=95
INTERFACE_TOP_LEVEL_SCOPE=96
JAVA_ID_PART=97
JAVA_ID_START=98
JAVA_SOURCE=99
LABELED_STATEMENT=100
LBRACK=101
LCURLY=102
LESS_OR_EQUAL=103
LESS_THAN=104
LINE_COMMENT=105
LOCAL_MODIFIER_LIST=106
LOGICAL_AND=107
LOGICAL_NOT=108
LOGICAL_OR=109
LONG=110
LPAREN=111
METHOD_CALL=112
MINUS=113
MINUS_ASSIGN=114
MOD=115
MODIFIER_LIST=116
MOD_ASSIGN=117
NATIVE=118
NEW=119
NOT=120
NOT_EQUAL=121
NULL=122
OCTAL_ESCAPE=123
OCTAL_LITERAL=124
OR=125
OR_ASSIGN=126
PACKAGE=127
PARENTESIZED_EXPR=128
PLUS=129
PLUS_ASSIGN=130
POST_DEC=131
POST_INC=132
PRE_DEC=133
PRE_INC=134
PRIVATE=135
PROTECTED=136
PUBLIC=137
QUALIFIED_TYPE_IDENT=138
QUESTION=139
RBRACK=140
RCURLY=141
RETURN=142
RPAREN=143
SEMI=144
SHIFT_LEFT=145
SHIFT_LEFT_ASSIGN=146
SHIFT_RIGHT=147
SHIFT_RIGHT_ASSIGN=148
SHORT=149
STAR=150
STAR_ASSIGN=151
STATIC=152
STATIC_ARRAY_CREATOR=153
STRICTFP=154
STRING_LITERAL=155
SUPER=156
SUPER_CONSTRUCTOR_CALL=157
SWITCH=158
SWITCH_BLOCK_LABEL_LIST=159
SYNCHRONIZED=160
THIS=161
THIS_CONSTRUCTOR_CALL=162
THROW=163
THROWS=164
THROWS_CLAUSE=165
TRANSIENT=166
TRUE=167
TRY=168
TYPE=169
UNARY_MINUS=170
UNARY_PLUS=171
UNICODE_ESCAPE=172
VAR_DECLARATION=173
VAR_DECLARATOR=174
VAR_DECLARATOR_LIST=175
VOID=176
VOID_METHOD_DECL=177
VOLATILE=178
WHILE=179
WS=180
XOR=181
XOR_ASSIGN=182
class JavaLexer(Lexer):
grammarFileName = "Java.g"
api_version = 1
def __init__(self, input=None, state=None):
if state is None:
state = RecognizerSharedState()
super(JavaLexer, self).__init__(input, state)
self.delegates = []
self.dfa28 = self.DFA28(
self, 28,
eot = self.DFA28_eot,
eof = self.DFA28_eof,
min = self.DFA28_min,
max = self.DFA28_max,
accept = self.DFA28_accept,
special = self.DFA28_special,
transition = self.DFA28_transition
)
# $ANTLR start "ABSTRACT"
def mABSTRACT(self, ):
try:
_type = ABSTRACT
_channel = DEFAULT_CHANNEL
# Java.g:7:10: ( 'abstract' )
# Java.g:7:12: 'abstract'
pass
self.match("abstract")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "ABSTRACT"
# $ANTLR start "AND"
def mAND(self, ):
try:
_type = AND
_channel = DEFAULT_CHANNEL
# Java.g:8:5: ( '&' )
# Java.g:8:7: '&'
pass
self.match(38)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "AND"
# $ANTLR start "AND_ASSIGN"
def mAND_ASSIGN(self, ):
try:
_type = AND_ASSIGN
_channel = DEFAULT_CHANNEL
# Java.g:9:12: ( '&=' )
# Java.g:9:14: '&='
pass
self.match("&=")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "AND_ASSIGN"
# $ANTLR start "ASSERT"
def mASSERT(self, ):
try:
_type = ASSERT
_channel = DEFAULT_CHANNEL
# Java.g:10:8: ( 'assert' )
# Java.g:10:10: 'assert'
pass
self.match("assert")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "ASSERT"
# $ANTLR start "ASSIGN"
def mASSIGN(self, ):
try:
_type = ASSIGN
_channel = DEFAULT_CHANNEL
# Java.g:11:8: ( '=' )
# Java.g:11:10: '='
pass
self.match(61)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "ASSIGN"
# $ANTLR start "AT"
def mAT(self, ):
try:
_type = AT
_channel = DEFAULT_CHANNEL
# Java.g:12:4: ( '@' )
# Java.g:12:6: '@'
pass
self.match(64)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "AT"
# $ANTLR start "BIT_SHIFT_RIGHT"
def mBIT_SHIFT_RIGHT(self, ):
try:
_type = BIT_SHIFT_RIGHT
_channel = DEFAULT_CHANNEL
# Java.g:13:17: ( '>>>' )
# Java.g:13:19: '>>>'
pass
self.match(">>>")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "BIT_SHIFT_RIGHT"
# $ANTLR start "BIT_SHIFT_RIGHT_ASSIGN"
def mBIT_SHIFT_RIGHT_ASSIGN(self, ):
try:
_type = BIT_SHIFT_RIGHT_ASSIGN
_channel = DEFAULT_CHANNEL
# Java.g:14:24: ( '>>>=' )
# Java.g:14:26: '>>>='
pass
self.match(">>>=")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "BIT_SHIFT_RIGHT_ASSIGN"
# $ANTLR start "BOOLEAN"
def mBOOLEAN(self, ):
try:
_type = BOOLEAN
_channel = DEFAULT_CHANNEL
# Java.g:15:9: ( 'boolean' )
# Java.g:15:11: 'boolean'
pass
self.match("boolean")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "BOOLEAN"
# $ANTLR start "BREAK"
def mBREAK(self, ):
try:
_type = BREAK
_channel = DEFAULT_CHANNEL
# Java.g:16:7: ( 'break' )
# Java.g:16:9: 'break'
pass
self.match("break")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "BREAK"
# $ANTLR start "BYTE"
def mBYTE(self, ):
try:
_type = BYTE
_channel = DEFAULT_CHANNEL
# Java.g:17:6: ( 'byte' )
# Java.g:17:8: 'byte'
pass
self.match("byte")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "BYTE"
# $ANTLR start "CASE"
def mCASE(self, ):
try:
_type = CASE
_channel = DEFAULT_CHANNEL
# Java.g:18:6: ( 'case' )
# Java.g:18:8: 'case'
pass
self.match("case")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "CASE"
# $ANTLR start "CATCH"
def mCATCH(self, ):
try:
_type = CATCH
_channel = DEFAULT_CHANNEL
# Java.g:19:7: ( 'catch' )
# Java.g:19:9: 'catch'
pass
self.match("catch")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "CATCH"
# $ANTLR start "CHAR"
def mCHAR(self, ):
try:
_type = CHAR
_channel = DEFAULT_CHANNEL
# Java.g:20:6: ( 'char' )
# Java.g:20:8: 'char'
pass
self.match("char")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "CHAR"
# $ANTLR start "CLASS"
def mCLASS(self, ):
try:
_type = CLASS
_channel = DEFAULT_CHANNEL
# Java.g:21:7: ( 'class' )
# Java.g:21:9: 'class'
pass
self.match("class")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "CLASS"
# $ANTLR start "COLON"
def mCOLON(self, ):
try:
_type = COLON
_channel = DEFAULT_CHANNEL
# Java.g:22:7: ( ':' )
# Java.g:22:9: ':'
pass
self.match(58)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "COLON"
# $ANTLR start "COMMA"
def mCOMMA(self, ):
try:
_type = COMMA
_channel = DEFAULT_CHANNEL
# Java.g:23:7: ( ',' )
# Java.g:23:9: ','
pass
self.match(44)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "COMMA"
# $ANTLR start "CONTINUE"
def mCONTINUE(self, ):
try:
_type = CONTINUE
_channel = DEFAULT_CHANNEL
# Java.g:24:10: ( 'continue' )
# Java.g:24:12: 'continue'
pass
self.match("continue")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "CONTINUE"
# $ANTLR start "DEC"
def mDEC(self, ):
try:
_type = DEC
_channel = DEFAULT_CHANNEL
# Java.g:25:5: ( '--' )
# Java.g:25:7: '--'
pass
self.match("--")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "DEC"
# $ANTLR start "DEFAULT"
def mDEFAULT(self, ):
try:
_type = DEFAULT
_channel = DEFAULT_CHANNEL
# Java.g:26:9: ( 'default' )
# Java.g:26:11: 'default'
pass
self.match("default")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "DEFAULT"
# $ANTLR start "DIV"
def mDIV(self, ):
try:
_type = DIV
_channel = DEFAULT_CHANNEL
# Java.g:27:5: ( '/' )
# Java.g:27:7: '/'
pass
self.match(47)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "DIV"
# $ANTLR start "DIV_ASSIGN"
def mDIV_ASSIGN(self, ):
try:
_type = DIV_ASSIGN
_channel = DEFAULT_CHANNEL
# Java.g:28:12: ( '/=' )
# Java.g:28:14: '/='
pass
self.match("/=")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "DIV_ASSIGN"
# $ANTLR start "DO"
def mDO(self, ):
try:
_type = DO
_channel = DEFAULT_CHANNEL
# Java.g:29:4: ( 'do' )
# Java.g:29:6: 'do'
pass
self.match("do")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "DO"
# $ANTLR start "DOT"
def mDOT(self, ):
try:
_type = DOT
_channel = DEFAULT_CHANNEL
# Java.g:30:5: ( '.' )
# Java.g:30:7: '.'
pass
self.match(46)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "DOT"
# $ANTLR start "DOTSTAR"
def mDOTSTAR(self, ):
try:
_type = DOTSTAR
_channel = DEFAULT_CHANNEL
# Java.g:31:9: ( '.*' )
# Java.g:31:11: '.*'
pass
self.match(".*")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "DOTSTAR"
# $ANTLR start "DOUBLE"
def mDOUBLE(self, ):
try:
_type = DOUBLE
_channel = DEFAULT_CHANNEL
# Java.g:32:8: ( 'double' )
# Java.g:32:10: 'double'
pass
self.match("double")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "DOUBLE"
# $ANTLR start "ELLIPSIS"
def mELLIPSIS(self, ):
try:
_type = ELLIPSIS
_channel = DEFAULT_CHANNEL
# Java.g:33:10: ( '...' )
# Java.g:33:12: '...'
pass
self.match("...")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "ELLIPSIS"
# $ANTLR start "ELSE"
def mELSE(self, ):
try:
_type = ELSE
_channel = DEFAULT_CHANNEL
# Java.g:34:6: ( 'else' )
# Java.g:34:8: 'else'
pass
self.match("else")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "ELSE"
# $ANTLR start "ENUM"
def mENUM(self, ):
try:
_type = ENUM
_channel = DEFAULT_CHANNEL
# Java.g:35:6: ( 'enum' )
# Java.g:35:8: 'enum'
pass
self.match("enum")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "ENUM"
# $ANTLR start "EQUAL"
def mEQUAL(self, ):
try:
_type = EQUAL
_channel = DEFAULT_CHANNEL
# Java.g:36:7: ( '==' )
# Java.g:36:9: '=='
pass
self.match("==")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "EQUAL"
# $ANTLR start "EXTENDS"
def mEXTENDS(self, ):
try:
_type = EXTENDS
_channel = DEFAULT_CHANNEL
# Java.g:37:9: ( 'extends' )
# Java.g:37:11: 'extends'
pass
self.match("extends")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "EXTENDS"
# $ANTLR start "FALSE"
def mFALSE(self, ):
try:
_type = FALSE
_channel = DEFAULT_CHANNEL
# Java.g:38:7: ( 'false' )
# Java.g:38:9: 'false'
pass
self.match("false")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "FALSE"
# $ANTLR start "FINAL"
def mFINAL(self, ):
try:
_type = FINAL
_channel = DEFAULT_CHANNEL
# Java.g:39:7: ( 'final' )
# Java.g:39:9: 'final'
pass
self.match("final")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "FINAL"
# $ANTLR start "FINALLY"
def mFINALLY(self, ):
try:
_type = FINALLY
_channel = DEFAULT_CHANNEL
# Java.g:40:9: ( 'finally' )
# Java.g:40:11: 'finally'
pass
self.match("finally")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "FINALLY"
# $ANTLR start "FLOAT"
def mFLOAT(self, ):
try:
_type = FLOAT
_channel = DEFAULT_CHANNEL
# Java.g:41:7: ( 'float' )
# Java.g:41:9: 'float'
pass
self.match("float")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "FLOAT"
# $ANTLR start "FOR"
def mFOR(self, ):
try:
_type = FOR
_channel = DEFAULT_CHANNEL
# Java.g:42:5: ( 'for' )
# Java.g:42:7: 'for'
pass
self.match("for")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "FOR"
# $ANTLR start "GREATER_OR_EQUAL"
def mGREATER_OR_EQUAL(self, ):
try:
_type = GREATER_OR_EQUAL
_channel = DEFAULT_CHANNEL
# Java.g:43:18: ( '>=' )
# Java.g:43:20: '>='
pass
self.match(">=")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "GREATER_OR_EQUAL"
# $ANTLR start "GREATER_THAN"
def mGREATER_THAN(self, ):
try:
_type = GREATER_THAN
_channel = DEFAULT_CHANNEL
# Java.g:44:14: ( '>' )
# Java.g:44:16: '>'
pass
self.match(62)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "GREATER_THAN"
# $ANTLR start "IF"
def mIF(self, ):
try:
_type = IF
_channel = DEFAULT_CHANNEL
# Java.g:45:4: ( 'if' )
# Java.g:45:6: 'if'
pass
self.match("if")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "IF"
# $ANTLR start "IMPLEMENTS"
def mIMPLEMENTS(self, ):
try:
_type = IMPLEMENTS
_channel = DEFAULT_CHANNEL
# Java.g:46:12: ( 'implements' )
# Java.g:46:14: 'implements'
pass
self.match("implements")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "IMPLEMENTS"
# $ANTLR start "IMPORT"
def mIMPORT(self, ):
try:
_type = IMPORT
_channel = DEFAULT_CHANNEL
# Java.g:47:8: ( 'import' )
# Java.g:47:10: 'import'
pass
self.match("import")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "IMPORT"
# $ANTLR start "INC"
def mINC(self, ):
try:
_type = INC
_channel = DEFAULT_CHANNEL
# Java.g:48:5: ( '++' )
# Java.g:48:7: '++'
pass
self.match("++")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "INC"
# $ANTLR start "INSTANCEOF"
def mINSTANCEOF(self, ):
try:
_type = INSTANCEOF
_channel = DEFAULT_CHANNEL
# Java.g:49:12: ( 'instanceof' )
# Java.g:49:14: 'instanceof'
pass
self.match("instanceof")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "INSTANCEOF"
# $ANTLR start "INT"
def mINT(self, ):
try:
_type = INT
_channel = DEFAULT_CHANNEL
# Java.g:50:5: ( 'int' )
# Java.g:50:7: 'int'
pass
self.match("int")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "INT"
# $ANTLR start "INTERFACE"
def mINTERFACE(self, ):
try:
_type = INTERFACE
_channel = DEFAULT_CHANNEL
# Java.g:51:11: ( 'interface' )
# Java.g:51:13: 'interface'
pass
self.match("interface")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "INTERFACE"
# $ANTLR start "LBRACK"
def mLBRACK(self, ):
try:
_type = LBRACK
_channel = DEFAULT_CHANNEL
# Java.g:52:8: ( '[' )
# Java.g:52:10: '['
pass
self.match(91)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "LBRACK"
# $ANTLR start "LCURLY"
def mLCURLY(self, ):
try:
_type = LCURLY
_channel = DEFAULT_CHANNEL
# Java.g:53:8: ( '{' )
# Java.g:53:10: '{'
pass
self.match(123)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "LCURLY"
# $ANTLR start "LESS_OR_EQUAL"
def mLESS_OR_EQUAL(self, ):
try:
_type = LESS_OR_EQUAL
_channel = DEFAULT_CHANNEL
# Java.g:54:15: ( '<=' )
# Java.g:54:17: '<='
pass
self.match("<=")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "LESS_OR_EQUAL"
# $ANTLR start "LESS_THAN"
def mLESS_THAN(self, ):
try:
_type = LESS_THAN
_channel = DEFAULT_CHANNEL
# Java.g:55:11: ( '<' )
# Java.g:55:13: '<'
pass
self.match(60)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "LESS_THAN"
# $ANTLR start "LOGICAL_AND"
def mLOGICAL_AND(self, ):
try:
_type = LOGICAL_AND
_channel = DEFAULT_CHANNEL
# Java.g:56:13: ( '&&' )
# Java.g:56:15: '&&'
pass
self.match("&&")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "LOGICAL_AND"
# $ANTLR start "LOGICAL_NOT"
def mLOGICAL_NOT(self, ):
try:
_type = LOGICAL_NOT
_channel = DEFAULT_CHANNEL
# Java.g:57:13: ( '!' )
# Java.g:57:15: '!'
pass
self.match(33)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "LOGICAL_NOT"
# $ANTLR start "LOGICAL_OR"
def mLOGICAL_OR(self, ):
try:
_type = LOGICAL_OR
_channel = DEFAULT_CHANNEL
# Java.g:58:12: ( '||' )
# Java.g:58:14: '||'
pass
self.match("||")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "LOGICAL_OR"
# $ANTLR start "LONG"
def mLONG(self, ):
try:
_type = LONG
_channel = DEFAULT_CHANNEL
# Java.g:59:6: ( 'long' )
# Java.g:59:8: 'long'
pass
self.match("long")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "LONG"
# $ANTLR start "LPAREN"
def mLPAREN(self, ):
try:
_type = LPAREN
_channel = DEFAULT_CHANNEL
# Java.g:60:8: ( '(' )
# Java.g:60:10: '('
pass
self.match(40)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "LPAREN"
# $ANTLR start "MINUS"
def mMINUS(self, ):
try:
_type = MINUS
_channel = DEFAULT_CHANNEL
# Java.g:61:7: ( '-' )
# Java.g:61:9: '-'
pass
self.match(45)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "MINUS"
# $ANTLR start "MINUS_ASSIGN"
def mMINUS_ASSIGN(self, ):
try:
_type = MINUS_ASSIGN
_channel = DEFAULT_CHANNEL
# Java.g:62:14: ( '-=' )
# Java.g:62:16: '-='
pass
self.match("-=")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "MINUS_ASSIGN"
# $ANTLR start "MOD"
def mMOD(self, ):
try:
_type = MOD
_channel = DEFAULT_CHANNEL
# Java.g:63:5: ( '%' )
# Java.g:63:7: '%'
pass
self.match(37)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "MOD"
# $ANTLR start "MOD_ASSIGN"
def mMOD_ASSIGN(self, ):
try:
_type = MOD_ASSIGN
_channel = DEFAULT_CHANNEL
# Java.g:64:12: ( '%=' )
# Java.g:64:14: '%='
pass
self.match("%=")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "MOD_ASSIGN"
# $ANTLR start "NATIVE"
def mNATIVE(self, ):
try:
_type = NATIVE
_channel = DEFAULT_CHANNEL
# Java.g:65:8: ( 'native' )
# Java.g:65:10: 'native'
pass
self.match("native")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "NATIVE"
# $ANTLR start "NEW"
def mNEW(self, ):
try:
_type = NEW
_channel = DEFAULT_CHANNEL
# Java.g:66:5: ( 'new' )
# Java.g:66:7: 'new'
pass
self.match("new")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "NEW"
# $ANTLR start "NOT"
def mNOT(self, ):
try:
_type = NOT
_channel = DEFAULT_CHANNEL
# Java.g:67:5: ( '~' )
# Java.g:67:7: '~'
pass
self.match(126)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "NOT"
# $ANTLR start "NOT_EQUAL"
def mNOT_EQUAL(self, ):
try:
_type = NOT_EQUAL
_channel = DEFAULT_CHANNEL
# Java.g:68:11: ( '!=' )
# Java.g:68:13: '!='
pass
self.match("!=")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "NOT_EQUAL"
# $ANTLR start "NULL"
def mNULL(self, ):
try:
_type = NULL
_channel = DEFAULT_CHANNEL
# Java.g:69:6: ( 'null' )
# Java.g:69:8: 'null'
pass
self.match("null")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "NULL"
# $ANTLR start "OR"
def mOR(self, ):
try:
_type = OR
_channel = DEFAULT_CHANNEL
# Java.g:70:4: ( '|' )
# Java.g:70:6: '|'
pass
self.match(124)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "OR"
# $ANTLR start "OR_ASSIGN"
def mOR_ASSIGN(self, ):
try:
_type = OR_ASSIGN
_channel = DEFAULT_CHANNEL
# Java.g:71:11: ( '|=' )
# Java.g:71:13: '|='
pass
self.match("|=")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "OR_ASSIGN"
# $ANTLR start "PACKAGE"
def mPACKAGE(self, ):
try:
_type = PACKAGE
_channel = DEFAULT_CHANNEL
# Java.g:72:9: ( 'package' )
# Java.g:72:11: 'package'
pass
self.match("package")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "PACKAGE"
# $ANTLR start "PLUS"
def mPLUS(self, ):
try:
_type = PLUS
_channel = DEFAULT_CHANNEL
# Java.g:73:6: ( '+' )
# Java.g:73:8: '+'
pass
self.match(43)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "PLUS"
# $ANTLR start "PLUS_ASSIGN"
def mPLUS_ASSIGN(self, ):
try:
_type = PLUS_ASSIGN
_channel = DEFAULT_CHANNEL
# Java.g:74:13: ( '+=' )
# Java.g:74:15: '+='
pass
self.match("+=")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "PLUS_ASSIGN"
# $ANTLR start "PRIVATE"
def mPRIVATE(self, ):
try:
_type = PRIVATE
_channel = DEFAULT_CHANNEL
# Java.g:75:9: ( 'private' )
# Java.g:75:11: 'private'
pass
self.match("private")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "PRIVATE"
# $ANTLR start "PROTECTED"
def mPROTECTED(self, ):
try:
_type = PROTECTED
_channel = DEFAULT_CHANNEL
# Java.g:76:11: ( 'protected' )
# Java.g:76:13: 'protected'
pass
self.match("protected")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "PROTECTED"
# $ANTLR start "PUBLIC"
def mPUBLIC(self, ):
try:
_type = PUBLIC
_channel = DEFAULT_CHANNEL
# Java.g:77:8: ( 'public' )
# Java.g:77:10: 'public'
pass
self.match("public")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "PUBLIC"
# $ANTLR start "QUESTION"
def mQUESTION(self, ):
try:
_type = QUESTION
_channel = DEFAULT_CHANNEL
# Java.g:78:10: ( '?' )
# Java.g:78:12: '?'
pass
self.match(63)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "QUESTION"
# $ANTLR start "RBRACK"
def mRBRACK(self, ):
try:
_type = RBRACK
_channel = DEFAULT_CHANNEL
# Java.g:79:8: ( ']' )
# Java.g:79:10: ']'
pass
self.match(93)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "RBRACK"
# $ANTLR start "RCURLY"
def mRCURLY(self, ):
try:
_type = RCURLY
_channel = DEFAULT_CHANNEL
# Java.g:80:8: ( '}' )
# Java.g:80:10: '}'
pass
self.match(125)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "RCURLY"
# $ANTLR start "RETURN"
def mRETURN(self, ):
try:
_type = RETURN
_channel = DEFAULT_CHANNEL
# Java.g:81:8: ( 'return' )
# Java.g:81:10: 'return'
pass
self.match("return")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "RETURN"
# $ANTLR start "RPAREN"
def mRPAREN(self, ):
try:
_type = RPAREN
_channel = DEFAULT_CHANNEL
# Java.g:82:8: ( ')' )
# Java.g:82:10: ')'
pass
self.match(41)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "RPAREN"
# $ANTLR start "SEMI"
def mSEMI(self, ):
try:
_type = SEMI
_channel = DEFAULT_CHANNEL
# Java.g:83:6: ( ';' )
# Java.g:83:8: ';'
pass
self.match(59)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "SEMI"
# $ANTLR start "SHIFT_LEFT"
def mSHIFT_LEFT(self, ):
try:
_type = SHIFT_LEFT
_channel = DEFAULT_CHANNEL
# Java.g:84:12: ( '<<' )
# Java.g:84:14: '<<'
pass
self.match("<<")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "SHIFT_LEFT"
# $ANTLR start "SHIFT_LEFT_ASSIGN"
def mSHIFT_LEFT_ASSIGN(self, ):
try:
_type = SHIFT_LEFT_ASSIGN
_channel = DEFAULT_CHANNEL
# Java.g:85:19: ( '<<=' )
# Java.g:85:21: '<<='
pass
self.match("<<=")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "SHIFT_LEFT_ASSIGN"
# $ANTLR start "SHIFT_RIGHT"
def mSHIFT_RIGHT(self, ):
try:
_type = SHIFT_RIGHT
_channel = DEFAULT_CHANNEL
# Java.g:86:13: ( '>>' )
# Java.g:86:15: '>>'
pass
self.match(">>")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "SHIFT_RIGHT"
# $ANTLR start "SHIFT_RIGHT_ASSIGN"
def mSHIFT_RIGHT_ASSIGN(self, ):
try:
_type = SHIFT_RIGHT_ASSIGN
_channel = DEFAULT_CHANNEL
# Java.g:87:20: ( '>>=' )
# Java.g:87:22: '>>='
pass
self.match(">>=")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "SHIFT_RIGHT_ASSIGN"
# $ANTLR start "SHORT"
def mSHORT(self, ):
try:
_type = SHORT
_channel = DEFAULT_CHANNEL
# Java.g:88:7: ( 'short' )
# Java.g:88:9: 'short'
pass
self.match("short")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "SHORT"
# $ANTLR start "STAR"
def mSTAR(self, ):
try:
_type = STAR
_channel = DEFAULT_CHANNEL
# Java.g:89:6: ( '*' )
# Java.g:89:8: '*'
pass
self.match(42)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "STAR"
# $ANTLR start "STAR_ASSIGN"
def mSTAR_ASSIGN(self, ):
try:
_type = STAR_ASSIGN
_channel = DEFAULT_CHANNEL
# Java.g:90:13: ( '*=' )
# Java.g:90:15: '*='
pass
self.match("*=")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "STAR_ASSIGN"
# $ANTLR start "STATIC"
def mSTATIC(self, ):
try:
_type = STATIC
_channel = DEFAULT_CHANNEL
# Java.g:91:8: ( 'static' )
# Java.g:91:10: 'static'
pass
self.match("static")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "STATIC"
# $ANTLR start "STRICTFP"
def mSTRICTFP(self, ):
try:
_type = STRICTFP
_channel = DEFAULT_CHANNEL
# Java.g:92:10: ( 'strictfp' )
# Java.g:92:12: 'strictfp'
pass
self.match("strictfp")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "STRICTFP"
# $ANTLR start "SUPER"
def mSUPER(self, ):
try:
_type = SUPER
_channel = DEFAULT_CHANNEL
# Java.g:93:7: ( 'super' )
# Java.g:93:9: 'super'
pass
self.match("super")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "SUPER"
# $ANTLR start "SWITCH"
def mSWITCH(self, ):
try:
_type = SWITCH
_channel = DEFAULT_CHANNEL
# Java.g:94:8: ( 'switch' )
# Java.g:94:10: 'switch'
pass
self.match("switch")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "SWITCH"
# $ANTLR start "SYNCHRONIZED"
def mSYNCHRONIZED(self, ):
try:
_type = SYNCHRONIZED
_channel = DEFAULT_CHANNEL
# Java.g:95:14: ( 'synchronized' )
# Java.g:95:16: 'synchronized'
pass
self.match("synchronized")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "SYNCHRONIZED"
# $ANTLR start "THIS"
def mTHIS(self, ):
try:
_type = THIS
_channel = DEFAULT_CHANNEL
# Java.g:96:6: ( 'this' )
# Java.g:96:8: 'this'
pass
self.match("this")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "THIS"
# $ANTLR start "THROW"
def mTHROW(self, ):
try:
_type = THROW
_channel = DEFAULT_CHANNEL
# Java.g:97:7: ( 'throw' )
# Java.g:97:9: 'throw'
pass
self.match("throw")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "THROW"
# $ANTLR start "THROWS"
def mTHROWS(self, ):
try:
_type = THROWS
_channel = DEFAULT_CHANNEL
# Java.g:98:8: ( 'throws' )
# Java.g:98:10: 'throws'
pass
self.match("throws")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "THROWS"
# $ANTLR start "TRANSIENT"
def mTRANSIENT(self, ):
try:
_type = TRANSIENT
_channel = DEFAULT_CHANNEL
# Java.g:99:11: ( 'transient' )
# Java.g:99:13: 'transient'
pass
self.match("transient")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "TRANSIENT"
# $ANTLR start "TRUE"
def mTRUE(self, ):
try:
_type = TRUE
_channel = DEFAULT_CHANNEL
# Java.g:100:6: ( 'true' )
# Java.g:100:8: 'true'
pass
self.match("true")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "TRUE"
# $ANTLR start "TRY"
def mTRY(self, ):
try:
_type = TRY
_channel = DEFAULT_CHANNEL
# Java.g:101:5: ( 'try' )
# Java.g:101:7: 'try'
pass
self.match("try")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "TRY"
# $ANTLR start "VOID"
def mVOID(self, ):
try:
_type = VOID
_channel = DEFAULT_CHANNEL
# Java.g:102:6: ( 'void' )
# Java.g:102:8: 'void'
pass
self.match("void")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "VOID"
# $ANTLR start "VOLATILE"
def mVOLATILE(self, ):
try:
_type = VOLATILE
_channel = DEFAULT_CHANNEL
# Java.g:103:10: ( 'volatile' )
# Java.g:103:12: 'volatile'
pass
self.match("volatile")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "VOLATILE"
# $ANTLR start "WHILE"
def mWHILE(self, ):
try:
_type = WHILE
_channel = DEFAULT_CHANNEL
# Java.g:104:7: ( 'while' )
# Java.g:104:9: 'while'
pass
self.match("while")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "WHILE"
# $ANTLR start "XOR"
def mXOR(self, ):
try:
_type = XOR
_channel = DEFAULT_CHANNEL
# Java.g:105:5: ( '^' )
# Java.g:105:7: '^'
pass
self.match(94)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "XOR"
# $ANTLR start "XOR_ASSIGN"
def mXOR_ASSIGN(self, ):
try:
_type = XOR_ASSIGN
_channel = DEFAULT_CHANNEL
# Java.g:106:12: ( '^=' )
# Java.g:106:14: '^='
pass
self.match("^=")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "XOR_ASSIGN"
# $ANTLR start "HEX_LITERAL"
def mHEX_LITERAL(self, ):
try:
_type = HEX_LITERAL
_channel = DEFAULT_CHANNEL
# Java.g:1001:13: ( '0' ( 'x' | 'X' ) ( HEX_DIGIT )+ ( INTEGER_TYPE_SUFFIX )? )
# Java.g:1001:15: '0' ( 'x' | 'X' ) ( HEX_DIGIT )+ ( INTEGER_TYPE_SUFFIX )?
pass
self.match(48)
if self.input.LA(1) == 88 or self.input.LA(1) == 120:
self.input.consume()
else:
mse = MismatchedSetException(None, self.input)
self.recover(mse)
raise mse
# Java.g:1001:29: ( HEX_DIGIT )+
cnt1 = 0
while True: #loop1
alt1 = 2
LA1_0 = self.input.LA(1)
if ((48 <= LA1_0 <= 57) or (65 <= LA1_0 <= 70) or (97 <= LA1_0 <= 102)) :
alt1 = 1
if alt1 == 1:
# Java.g:
pass
if (48 <= self.input.LA(1) <= 57) or (65 <= self.input.LA(1) <= 70) or (97 <= self.input.LA(1) <= 102):
self.input.consume()
else:
mse = MismatchedSetException(None, self.input)
self.recover(mse)
raise mse
else:
if cnt1 >= 1:
break #loop1
eee = EarlyExitException(1, self.input)
raise eee
cnt1 += 1
# Java.g:1001:40: ( INTEGER_TYPE_SUFFIX )?
alt2 = 2
LA2_0 = self.input.LA(1)
if (LA2_0 == 76 or LA2_0 == 108) :
alt2 = 1
if alt2 == 1:
# Java.g:
pass
if self.input.LA(1) == 76 or self.input.LA(1) == 108:
self.input.consume()
else:
mse = MismatchedSetException(None, self.input)
self.recover(mse)
raise mse
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "HEX_LITERAL"
# $ANTLR start "DECIMAL_LITERAL"
def mDECIMAL_LITERAL(self, ):
try:
_type = DECIMAL_LITERAL
_channel = DEFAULT_CHANNEL
# Java.g:1003:17: ( ( '0' | '1' .. '9' ( '0' .. '9' )* ) ( INTEGER_TYPE_SUFFIX )? )
# Java.g:1003:19: ( '0' | '1' .. '9' ( '0' .. '9' )* ) ( INTEGER_TYPE_SUFFIX )?
pass
# Java.g:1003:19: ( '0' | '1' .. '9' ( '0' .. '9' )* )
alt4 = 2
LA4_0 = self.input.LA(1)
if (LA4_0 == 48) :
alt4 = 1
elif ((49 <= LA4_0 <= 57)) :
alt4 = 2
else:
nvae = NoViableAltException("", 4, 0, self.input)
raise nvae
if alt4 == 1:
# Java.g:1003:20: '0'
pass
self.match(48)
elif alt4 == 2:
# Java.g:1003:26: '1' .. '9' ( '0' .. '9' )*
pass
self.matchRange(49, 57)
# Java.g:1003:35: ( '0' .. '9' )*
while True: #loop3
alt3 = 2
LA3_0 = self.input.LA(1)
if ((48 <= LA3_0 <= 57)) :
alt3 = 1
if alt3 == 1:
# Java.g:
pass
if (48 <= self.input.LA(1) <= 57):
self.input.consume()
else:
mse = MismatchedSetException(None, self.input)
self.recover(mse)
raise mse
else:
break #loop3
# Java.g:1003:46: ( INTEGER_TYPE_SUFFIX )?
alt5 = 2
LA5_0 = self.input.LA(1)
if (LA5_0 == 76 or LA5_0 == 108) :
alt5 = 1
if alt5 == 1:
# Java.g:
pass
if self.input.LA(1) == 76 or self.input.LA(1) == 108:
self.input.consume()
else:
mse = MismatchedSetException(None, self.input)
self.recover(mse)
raise mse
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "DECIMAL_LITERAL"
# $ANTLR start "OCTAL_LITERAL"
def mOCTAL_LITERAL(self, ):
try:
_type = OCTAL_LITERAL
_channel = DEFAULT_CHANNEL
# Java.g:1005:15: ( '0' ( '0' .. '7' )+ ( INTEGER_TYPE_SUFFIX )? )
# Java.g:1005:17: '0' ( '0' .. '7' )+ ( INTEGER_TYPE_SUFFIX )?
pass
self.match(48)
# Java.g:1005:21: ( '0' .. '7' )+
cnt6 = 0
while True: #loop6
alt6 = 2
LA6_0 = self.input.LA(1)
if ((48 <= LA6_0 <= 55)) :
alt6 = 1
if alt6 == 1:
# Java.g:
pass
if (48 <= self.input.LA(1) <= 55):
self.input.consume()
else:
mse = MismatchedSetException(None, self.input)
self.recover(mse)
raise mse
else:
if cnt6 >= 1:
break #loop6
eee = EarlyExitException(6, self.input)
raise eee
cnt6 += 1
# Java.g:1005:33: ( INTEGER_TYPE_SUFFIX )?
alt7 = 2
LA7_0 = self.input.LA(1)
if (LA7_0 == 76 or LA7_0 == 108) :
alt7 = 1
if alt7 == 1:
# Java.g:
pass
if self.input.LA(1) == 76 or self.input.LA(1) == 108:
self.input.consume()
else:
mse = MismatchedSetException(None, self.input)
self.recover(mse)
raise mse
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "OCTAL_LITERAL"
# $ANTLR start "HEX_DIGIT"
def mHEX_DIGIT(self, ):
try:
# Java.g:1009:11: ( ( '0' .. '9' | 'a' .. 'f' | 'A' .. 'F' ) )
# Java.g:
pass
if (48 <= self.input.LA(1) <= 57) or (65 <= self.input.LA(1) <= 70) or (97 <= self.input.LA(1) <= 102):
self.input.consume()
else:
mse = MismatchedSetException(None, self.input)
self.recover(mse)
raise mse
finally:
pass
# $ANTLR end "HEX_DIGIT"
# $ANTLR start "INTEGER_TYPE_SUFFIX"
def mINTEGER_TYPE_SUFFIX(self, ):
try:
# Java.g:1012:21: ( ( 'l' | 'L' ) )
# Java.g:
pass
if self.input.LA(1) == 76 or self.input.LA(1) == 108:
self.input.consume()
else:
mse = MismatchedSetException(None, self.input)
self.recover(mse)
raise mse
finally:
pass
# $ANTLR end "INTEGER_TYPE_SUFFIX"
# $ANTLR start "FLOATING_POINT_LITERAL"
def mFLOATING_POINT_LITERAL(self, ):
try:
_type = FLOATING_POINT_LITERAL
_channel = DEFAULT_CHANNEL
# Java.g:1014:5: ( ( '0' .. '9' )+ ( DOT ( '0' .. '9' )* ( EXPONENT )? ( FLOAT_TYPE_SUFFIX )? | EXPONENT ( FLOAT_TYPE_SUFFIX )? | FLOAT_TYPE_SUFFIX ) | DOT ( '0' .. '9' )+ ( EXPONENT )? ( FLOAT_TYPE_SUFFIX )? )
alt17 = 2
LA17_0 = self.input.LA(1)
if ((48 <= LA17_0 <= 57)) :
alt17 = 1
elif (LA17_0 == 46) :
alt17 = 2
else:
nvae = NoViableAltException("", 17, 0, self.input)
raise nvae
if alt17 == 1:
# Java.g:1014:9: ( '0' .. '9' )+ ( DOT ( '0' .. '9' )* ( EXPONENT )? ( FLOAT_TYPE_SUFFIX )? | EXPONENT ( FLOAT_TYPE_SUFFIX )? | FLOAT_TYPE_SUFFIX )
pass
# Java.g:1014:9: ( '0' .. '9' )+
cnt8 = 0
while True: #loop8
alt8 = 2
LA8_0 = self.input.LA(1)
if ((48 <= LA8_0 <= 57)) :
alt8 = 1
if alt8 == 1:
# Java.g:
pass
if (48 <= self.input.LA(1) <= 57):
self.input.consume()
else:
mse = MismatchedSetException(None, self.input)
self.recover(mse)
raise mse
else:
if cnt8 >= 1:
break #loop8
eee = EarlyExitException(8, self.input)
raise eee
cnt8 += 1
# Java.g:1015:9: ( DOT ( '0' .. '9' )* ( EXPONENT )? ( FLOAT_TYPE_SUFFIX )? | EXPONENT ( FLOAT_TYPE_SUFFIX )? | FLOAT_TYPE_SUFFIX )
alt13 = 3
LA13 = self.input.LA(1)
if LA13 == 46:
alt13 = 1
elif LA13 == 69 or LA13 == 101:
alt13 = 2
elif LA13 == 68 or LA13 == 70 or LA13 == 100 or LA13 == 102:
alt13 = 3
else:
nvae = NoViableAltException("", 13, 0, self.input)
raise nvae
if alt13 == 1:
# Java.g:1016:13: DOT ( '0' .. '9' )* ( EXPONENT )? ( FLOAT_TYPE_SUFFIX )?
pass
self.mDOT()
# Java.g:1016:17: ( '0' .. '9' )*
while True: #loop9
alt9 = 2
LA9_0 = self.input.LA(1)
if ((48 <= LA9_0 <= 57)) :
alt9 = 1
if alt9 == 1:
# Java.g:
pass
if (48 <= self.input.LA(1) <= 57):
self.input.consume()
else:
mse = MismatchedSetException(None, self.input)
self.recover(mse)
raise mse
else:
break #loop9
# Java.g:1016:29: ( EXPONENT )?
alt10 = 2
LA10_0 = self.input.LA(1)
if (LA10_0 == 69 or LA10_0 == 101) :
alt10 = 1
if alt10 == 1:
# Java.g:1016:29: EXPONENT
pass
self.mEXPONENT()
# Java.g:1016:39: ( FLOAT_TYPE_SUFFIX )?
alt11 = 2
LA11_0 = self.input.LA(1)
if (LA11_0 == 68 or LA11_0 == 70 or LA11_0 == 100 or LA11_0 == 102) :
alt11 = 1
if alt11 == 1:
# Java.g:
pass
if self.input.LA(1) == 68 or self.input.LA(1) == 70 or self.input.LA(1) == 100 or self.input.LA(1) == 102:
self.input.consume()
else:
mse = MismatchedSetException(None, self.input)
self.recover(mse)
raise mse
elif alt13 == 2:
# Java.g:1017:13: EXPONENT ( FLOAT_TYPE_SUFFIX )?
pass
self.mEXPONENT()
# Java.g:1017:22: ( FLOAT_TYPE_SUFFIX )?
alt12 = 2
LA12_0 = self.input.LA(1)
if (LA12_0 == 68 or LA12_0 == 70 or LA12_0 == 100 or LA12_0 == 102) :
alt12 = 1
if alt12 == 1:
# Java.g:
pass
if self.input.LA(1) == 68 or self.input.LA(1) == 70 or self.input.LA(1) == 100 or self.input.LA(1) == 102:
self.input.consume()
else:
mse = MismatchedSetException(None, self.input)
self.recover(mse)
raise mse
elif alt13 == 3:
# Java.g:1018:13: FLOAT_TYPE_SUFFIX
pass
self.mFLOAT_TYPE_SUFFIX()
elif alt17 == 2:
# Java.g:1020:9: DOT ( '0' .. '9' )+ ( EXPONENT )? ( FLOAT_TYPE_SUFFIX )?
pass
self.mDOT()
# Java.g:1020:13: ( '0' .. '9' )+
cnt14 = 0
while True: #loop14
alt14 = 2
LA14_0 = self.input.LA(1)
if ((48 <= LA14_0 <= 57)) :
alt14 = 1
if alt14 == 1:
# Java.g:
pass
if (48 <= self.input.LA(1) <= 57):
self.input.consume()
else:
mse = MismatchedSetException(None, self.input)
self.recover(mse)
raise mse
else:
if cnt14 >= 1:
break #loop14
eee = EarlyExitException(14, self.input)
raise eee
cnt14 += 1
# Java.g:1020:25: ( EXPONENT )?
alt15 = 2
LA15_0 = self.input.LA(1)
if (LA15_0 == 69 or LA15_0 == 101) :
alt15 = 1
if alt15 == 1:
# Java.g:1020:25: EXPONENT
pass
self.mEXPONENT()
# Java.g:1020:35: ( FLOAT_TYPE_SUFFIX )?
alt16 = 2
LA16_0 = self.input.LA(1)
if (LA16_0 == 68 or LA16_0 == 70 or LA16_0 == 100 or LA16_0 == 102) :
alt16 = 1
if alt16 == 1:
# Java.g:
pass
if self.input.LA(1) == 68 or self.input.LA(1) == 70 or self.input.LA(1) == 100 or self.input.LA(1) == 102:
self.input.consume()
else:
mse = MismatchedSetException(None, self.input)
self.recover(mse)
raise mse
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "FLOATING_POINT_LITERAL"
# $ANTLR start "EXPONENT"
def mEXPONENT(self, ):
try:
# Java.g:1025:10: ( ( 'e' | 'E' ) ( '+' | '-' )? ( '0' .. '9' )+ )
# Java.g:1025:12: ( 'e' | 'E' ) ( '+' | '-' )? ( '0' .. '9' )+
pass
if self.input.LA(1) == 69 or self.input.LA(1) == 101:
self.input.consume()
else:
mse = MismatchedSetException(None, self.input)
self.recover(mse)
raise mse
# Java.g:1025:22: ( '+' | '-' )?
alt18 = 2
LA18_0 = self.input.LA(1)
if (LA18_0 == 43 or LA18_0 == 45) :
alt18 = 1
if alt18 == 1:
# Java.g:
pass
if self.input.LA(1) == 43 or self.input.LA(1) == 45:
self.input.consume()
else:
mse = MismatchedSetException(None, self.input)
self.recover(mse)
raise mse
# Java.g:1025:33: ( '0' .. '9' )+
cnt19 = 0
while True: #loop19
alt19 = 2
LA19_0 = self.input.LA(1)
if ((48 <= LA19_0 <= 57)) :
alt19 = 1
if alt19 == 1:
# Java.g:
pass
if (48 <= self.input.LA(1) <= 57):
self.input.consume()
else:
mse = MismatchedSetException(None, self.input)
self.recover(mse)
raise mse
else:
if cnt19 >= 1:
break #loop19
eee = EarlyExitException(19, self.input)
raise eee
cnt19 += 1
finally:
pass
# $ANTLR end "EXPONENT"
# $ANTLR start "FLOAT_TYPE_SUFFIX"
def mFLOAT_TYPE_SUFFIX(self, ):
try:
# Java.g:1028:19: ( ( 'f' | 'F' | 'd' | 'D' ) )
# Java.g:
pass
if self.input.LA(1) == 68 or self.input.LA(1) == 70 or self.input.LA(1) == 100 or self.input.LA(1) == 102:
self.input.consume()
else:
mse = MismatchedSetException(None, self.input)
self.recover(mse)
raise mse
finally:
pass
# $ANTLR end "FLOAT_TYPE_SUFFIX"
# $ANTLR start "CHARACTER_LITERAL"
def mCHARACTER_LITERAL(self, ):
try:
_type = CHARACTER_LITERAL
_channel = DEFAULT_CHANNEL
# Java.g:1030:5: ( '\\'' ( ESCAPE_SEQUENCE |~ ( '\\'' | '\\\\' ) ) '\\'' )
# Java.g:1030:9: '\\'' ( ESCAPE_SEQUENCE |~ ( '\\'' | '\\\\' ) ) '\\''
pass
self.match(39)
# Java.g:1030:14: ( ESCAPE_SEQUENCE |~ ( '\\'' | '\\\\' ) )
alt20 = 2
LA20_0 = self.input.LA(1)
if (LA20_0 == 92) :
alt20 = 1
elif ((0 <= LA20_0 <= 38) or (40 <= LA20_0 <= 91) or (93 <= LA20_0 <= 65535)) :
alt20 = 2
else:
nvae = NoViableAltException("", 20, 0, self.input)
raise nvae
if alt20 == 1:
# Java.g:1030:16: ESCAPE_SEQUENCE
pass
self.mESCAPE_SEQUENCE()
elif alt20 == 2:
# Java.g:1030:34: ~ ( '\\'' | '\\\\' )
pass
if (0 <= self.input.LA(1) <= 38) or (40 <= self.input.LA(1) <= 91) or (93 <= self.input.LA(1) <= 65535):
self.input.consume()
else:
mse = MismatchedSetException(None, self.input)
self.recover(mse)
raise mse
self.match(39)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "CHARACTER_LITERAL"
# $ANTLR start "STRING_LITERAL"
def mSTRING_LITERAL(self, ):
try:
_type = STRING_LITERAL
_channel = DEFAULT_CHANNEL
# Java.g:1034:5: ( '\"' ( ESCAPE_SEQUENCE |~ ( '\\\\' | '\"' ) )* '\"' )
# Java.g:1034:8: '\"' ( ESCAPE_SEQUENCE |~ ( '\\\\' | '\"' ) )* '\"'
pass
self.match(34)
# Java.g:1034:12: ( ESCAPE_SEQUENCE |~ ( '\\\\' | '\"' ) )*
while True: #loop21
alt21 = 3
LA21_0 = self.input.LA(1)
if (LA21_0 == 92) :
alt21 = 1
elif ((0 <= LA21_0 <= 33) or (35 <= LA21_0 <= 91) or (93 <= LA21_0 <= 65535)) :
alt21 = 2
if alt21 == 1:
# Java.g:1034:14: ESCAPE_SEQUENCE
pass
self.mESCAPE_SEQUENCE()
elif alt21 == 2:
# Java.g:1034:32: ~ ( '\\\\' | '\"' )
pass
if (0 <= self.input.LA(1) <= 33) or (35 <= self.input.LA(1) <= 91) or (93 <= self.input.LA(1) <= 65535):
self.input.consume()
else:
mse = MismatchedSetException(None, self.input)
self.recover(mse)
raise mse
else:
break #loop21
self.match(34)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "STRING_LITERAL"
# $ANTLR start "ESCAPE_SEQUENCE"
def mESCAPE_SEQUENCE(self, ):
try:
# Java.g:1040:5: ( '\\\\' ( 'b' | 't' | 'n' | 'f' | 'r' | '\\\"' | '\\'' | '\\\\' ) | UNICODE_ESCAPE | OCTAL_ESCAPE )
alt22 = 3
LA22_0 = self.input.LA(1)
if (LA22_0 == 92) :
LA22 = self.input.LA(2)
if LA22 == 34 or LA22 == 39 or LA22 == 92 or LA22 == 98 or LA22 == 102 or LA22 == 110 or LA22 == 114 or LA22 == 116:
alt22 = 1
elif LA22 == 117:
alt22 = 2
elif LA22 == 48 or LA22 == 49 or LA22 == 50 or LA22 == 51 or LA22 == 52 or LA22 == 53 or LA22 == 54 or LA22 == 55:
alt22 = 3
else:
nvae = NoViableAltException("", 22, 1, self.input)
raise nvae
else:
nvae = NoViableAltException("", 22, 0, self.input)
raise nvae
if alt22 == 1:
# Java.g:1040:9: '\\\\' ( 'b' | 't' | 'n' | 'f' | 'r' | '\\\"' | '\\'' | '\\\\' )
pass
self.match(92)
if self.input.LA(1) == 34 or self.input.LA(1) == 39 or self.input.LA(1) == 92 or self.input.LA(1) == 98 or self.input.LA(1) == 102 or self.input.LA(1) == 110 or self.input.LA(1) == 114 or self.input.LA(1) == 116:
self.input.consume()
else:
mse = MismatchedSetException(None, self.input)
self.recover(mse)
raise mse
elif alt22 == 2:
# Java.g:1041:9: UNICODE_ESCAPE
pass
self.mUNICODE_ESCAPE()
elif alt22 == 3:
# Java.g:1042:9: OCTAL_ESCAPE
pass
self.mOCTAL_ESCAPE()
finally:
pass
# $ANTLR end "ESCAPE_SEQUENCE"
# $ANTLR start "OCTAL_ESCAPE"
def mOCTAL_ESCAPE(self, ):
try:
# Java.g:1047:5: ( '\\\\' ( '0' .. '3' ) ( '0' .. '7' ) ( '0' .. '7' ) | '\\\\' ( '0' .. '7' ) ( '0' .. '7' ) | '\\\\' ( '0' .. '7' ) )
alt23 = 3
LA23_0 = self.input.LA(1)
if (LA23_0 == 92) :
LA23_1 = self.input.LA(2)
if ((48 <= LA23_1 <= 51)) :
LA23_2 = self.input.LA(3)
if ((48 <= LA23_2 <= 55)) :
LA23_4 = self.input.LA(4)
if ((48 <= LA23_4 <= 55)) :
alt23 = 1
else:
alt23 = 2
else:
alt23 = 3
elif ((52 <= LA23_1 <= 55)) :
LA23_3 = self.input.LA(3)
if ((48 <= LA23_3 <= 55)) :
alt23 = 2
else:
alt23 = 3
else:
nvae = NoViableAltException("", 23, 1, self.input)
raise nvae
else:
nvae = NoViableAltException("", 23, 0, self.input)
raise nvae
if alt23 == 1:
# Java.g:1047:9: '\\\\' ( '0' .. '3' ) ( '0' .. '7' ) ( '0' .. '7' )
pass
self.match(92)
if (48 <= self.input.LA(1) <= 51):
self.input.consume()
else:
mse = MismatchedSetException(None, self.input)
self.recover(mse)
raise mse
if (48 <= self.input.LA(1) <= 55):
self.input.consume()
else:
mse = MismatchedSetException(None, self.input)
self.recover(mse)
raise mse
if (48 <= self.input.LA(1) <= 55):
self.input.consume()
else:
mse = MismatchedSetException(None, self.input)
self.recover(mse)
raise mse
elif alt23 == 2:
# Java.g:1048:9: '\\\\' ( '0' .. '7' ) ( '0' .. '7' )
pass
self.match(92)
if (48 <= self.input.LA(1) <= 55):
self.input.consume()
else:
mse = MismatchedSetException(None, self.input)
self.recover(mse)
raise mse
if (48 <= self.input.LA(1) <= 55):
self.input.consume()
else:
mse = MismatchedSetException(None, self.input)
self.recover(mse)
raise mse
elif alt23 == 3:
# Java.g:1049:9: '\\\\' ( '0' .. '7' )
pass
self.match(92)
if (48 <= self.input.LA(1) <= 55):
self.input.consume()
else:
mse = MismatchedSetException(None, self.input)
self.recover(mse)
raise mse
finally:
pass
# $ANTLR end "OCTAL_ESCAPE"
# $ANTLR start "UNICODE_ESCAPE"
def mUNICODE_ESCAPE(self, ):
try:
# Java.g:1054:5: ( '\\\\' 'u' HEX_DIGIT HEX_DIGIT HEX_DIGIT HEX_DIGIT )
# Java.g:1054:9: '\\\\' 'u' HEX_DIGIT HEX_DIGIT HEX_DIGIT HEX_DIGIT
pass
self.match(92)
self.match(117)
self.mHEX_DIGIT()
self.mHEX_DIGIT()
self.mHEX_DIGIT()
self.mHEX_DIGIT()
finally:
pass
# $ANTLR end "UNICODE_ESCAPE"
# $ANTLR start "IDENT"
def mIDENT(self, ):
try:
_type = IDENT
_channel = DEFAULT_CHANNEL
# Java.g:1057:5: ( JAVA_ID_START ( JAVA_ID_PART )* )
# Java.g:1057:9: JAVA_ID_START ( JAVA_ID_PART )*
pass
self.mJAVA_ID_START()
# Java.g:1057:23: ( JAVA_ID_PART )*
while True: #loop24
alt24 = 2
LA24_0 = self.input.LA(1)
if (LA24_0 == 36 or (48 <= LA24_0 <= 57) or (65 <= LA24_0 <= 90) or LA24_0 == 95 or (97 <= LA24_0 <= 122) or (192 <= LA24_0 <= 214) or (216 <= LA24_0 <= 246) or (248 <= LA24_0 <= 8191) or (12352 <= LA24_0 <= 12687) or (13056 <= LA24_0 <= 13183) or (13312 <= LA24_0 <= 15661) or (19968 <= LA24_0 <= 40959) or (63744 <= LA24_0 <= 64255)) :
alt24 = 1
if alt24 == 1:
# Java.g:
pass
if self.input.LA(1) == 36 or (48 <= self.input.LA(1) <= 57) or (65 <= self.input.LA(1) <= 90) or self.input.LA(1) == 95 or (97 <= self.input.LA(1) <= 122) or (192 <= self.input.LA(1) <= 214) or (216 <= self.input.LA(1) <= 246) or (248 <= self.input.LA(1) <= 8191) or (12352 <= self.input.LA(1) <= 12687) or (13056 <= self.input.LA(1) <= 13183) or (13312 <= self.input.LA(1) <= 15661) or (19968 <= self.input.LA(1) <= 40959) or (63744 <= self.input.LA(1) <= 64255):
self.input.consume()
else:
mse = MismatchedSetException(None, self.input)
self.recover(mse)
raise mse
else:
break #loop24
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "IDENT"
# $ANTLR start "JAVA_ID_START"
def mJAVA_ID_START(self, ):
try:
# Java.g:1063:5: ( '\\u0024' | '\\u0041' .. '\\u005a' | '\\u005f' | '\\u0061' .. '\\u007a' | '\\u00c0' .. '\\u00d6' | '\\u00d8' .. '\\u00f6' | '\\u00f8' .. '\\u00ff' | '\\u0100' .. '\\u1fff' | '\\u3040' .. '\\u318f' | '\\u3300' .. '\\u337f' | '\\u3400' .. '\\u3d2d' | '\\u4e00' .. '\\u9fff' | '\\uf900' .. '\\ufaff' )
# Java.g:
pass
if self.input.LA(1) == 36 or (65 <= self.input.LA(1) <= 90) or self.input.LA(1) == 95 or (97 <= self.input.LA(1) <= 122) or (192 <= self.input.LA(1) <= 214) or (216 <= self.input.LA(1) <= 246) or (248 <= self.input.LA(1) <= 8191) or (12352 <= self.input.LA(1) <= 12687) or (13056 <= self.input.LA(1) <= 13183) or (13312 <= self.input.LA(1) <= 15661) or (19968 <= self.input.LA(1) <= 40959) or (63744 <= self.input.LA(1) <= 64255):
self.input.consume()
else:
mse = MismatchedSetException(None, self.input)
self.recover(mse)
raise mse
finally:
pass
# $ANTLR end "JAVA_ID_START"
# $ANTLR start "JAVA_ID_PART"
def mJAVA_ID_PART(self, ):
try:
# Java.g:1080:5: ( JAVA_ID_START | '\\u0030' .. '\\u0039' )
# Java.g:
pass
if self.input.LA(1) == 36 or (48 <= self.input.LA(1) <= 57) or (65 <= self.input.LA(1) <= 90) or self.input.LA(1) == 95 or (97 <= self.input.LA(1) <= 122) or (192 <= self.input.LA(1) <= 214) or (216 <= self.input.LA(1) <= 246) or (248 <= self.input.LA(1) <= 8191) or (12352 <= self.input.LA(1) <= 12687) or (13056 <= self.input.LA(1) <= 13183) or (13312 <= self.input.LA(1) <= 15661) or (19968 <= self.input.LA(1) <= 40959) or (63744 <= self.input.LA(1) <= 64255):
self.input.consume()
else:
mse = MismatchedSetException(None, self.input)
self.recover(mse)
raise mse
finally:
pass
# $ANTLR end "JAVA_ID_PART"
# $ANTLR start "WS"
def mWS(self, ):
try:
_type = WS
_channel = DEFAULT_CHANNEL
# Java.g:1083:5: ( ( ' ' | '\\r' | '\\t' | '\\u000C' | '\\n' ) )
# Java.g:1083:8: ( ' ' | '\\r' | '\\t' | '\\u000C' | '\\n' )
pass
if (9 <= self.input.LA(1) <= 10) or (12 <= self.input.LA(1) <= 13) or self.input.LA(1) == 32:
self.input.consume()
else:
mse = MismatchedSetException(None, self.input)
self.recover(mse)
raise mse
#action start
_channel = HIDDEN;
#action end
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "WS"
# $ANTLR start "COMMENT"
def mCOMMENT(self, ):
try:
_type = COMMENT
_channel = DEFAULT_CHANNEL
# Java.g:1090:5: ( '/*' ( options {greedy=false; } : . )* '*/' )
# Java.g:1090:9: '/*' ( options {greedy=false; } : . )* '*/'
pass
self.match("/*")
# Java.g:1090:14: ( options {greedy=false; } : . )*
while True: #loop25
alt25 = 2
LA25_0 = self.input.LA(1)
if (LA25_0 == 42) :
LA25_1 = self.input.LA(2)
if (LA25_1 == 47) :
alt25 = 2
elif ((0 <= LA25_1 <= 46) or (48 <= LA25_1 <= 65535)) :
alt25 = 1
elif ((0 <= LA25_0 <= 41) or (43 <= LA25_0 <= 65535)) :
alt25 = 1
if alt25 == 1:
# Java.g:1090:42: .
pass
self.matchAny()
else:
break #loop25
self.match("*/")
#action start
_channel = HIDDEN;
#action end
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "COMMENT"
# $ANTLR start "LINE_COMMENT"
def mLINE_COMMENT(self, ):
try:
_type = LINE_COMMENT
_channel = DEFAULT_CHANNEL
# Java.g:1097:5: ( '//' (~ ( '\\n' | '\\r' ) )* ( '\\r' )? '\\n' )
# Java.g:1097:7: '//' (~ ( '\\n' | '\\r' ) )* ( '\\r' )? '\\n'
pass
self.match("//")
# Java.g:1097:12: (~ ( '\\n' | '\\r' ) )*
while True: #loop26
alt26 = 2
LA26_0 = self.input.LA(1)
if ((0 <= LA26_0 <= 9) or (11 <= LA26_0 <= 12) or (14 <= LA26_0 <= 65535)) :
alt26 = 1
if alt26 == 1:
# Java.g:
pass
if (0 <= self.input.LA(1) <= 9) or (11 <= self.input.LA(1) <= 12) or (14 <= self.input.LA(1) <= 65535):
self.input.consume()
else:
mse = MismatchedSetException(None, self.input)
self.recover(mse)
raise mse
else:
break #loop26
# Java.g:1097:26: ( '\\r' )?
alt27 = 2
LA27_0 = self.input.LA(1)
if (LA27_0 == 13) :
alt27 = 1
if alt27 == 1:
# Java.g:1097:26: '\\r'
pass
self.match(13)
self.match(10)
#action start
_channel = HIDDEN;
#action end
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "LINE_COMMENT"
def mTokens(self):
# Java.g:1:8: ( ABSTRACT | AND | AND_ASSIGN | ASSERT | ASSIGN | AT | BIT_SHIFT_RIGHT | BIT_SHIFT_RIGHT_ASSIGN | BOOLEAN | BREAK | BYTE | CASE | CATCH | CHAR | CLASS | COLON | COMMA | CONTINUE | DEC | DEFAULT | DIV | DIV_ASSIGN | DO | DOT | DOTSTAR | DOUBLE | ELLIPSIS | ELSE | ENUM | EQUAL | EXTENDS | FALSE | FINAL | FINALLY | FLOAT | FOR | GREATER_OR_EQUAL | GREATER_THAN | IF | IMPLEMENTS | IMPORT | INC | INSTANCEOF | INT | INTERFACE | LBRACK | LCURLY | LESS_OR_EQUAL | LESS_THAN | LOGICAL_AND | LOGICAL_NOT | LOGICAL_OR | LONG | LPAREN | MINUS | MINUS_ASSIGN | MOD | MOD_ASSIGN | NATIVE | NEW | NOT | NOT_EQUAL | NULL | OR | OR_ASSIGN | PACKAGE | PLUS | PLUS_ASSIGN | PRIVATE | PROTECTED | PUBLIC | QUESTION | RBRACK | RCURLY | RETURN | RPAREN | SEMI | SHIFT_LEFT | SHIFT_LEFT_ASSIGN | SHIFT_RIGHT | SHIFT_RIGHT_ASSIGN | SHORT | STAR | STAR_ASSIGN | STATIC | STRICTFP | SUPER | SWITCH | SYNCHRONIZED | THIS | THROW | THROWS | TRANSIENT | TRUE | TRY | VOID | VOLATILE | WHILE | XOR | XOR_ASSIGN | HEX_LITERAL | DECIMAL_LITERAL | OCTAL_LITERAL | FLOATING_POINT_LITERAL | CHARACTER_LITERAL | STRING_LITERAL | IDENT | WS | COMMENT | LINE_COMMENT )
alt28 = 110
alt28 = self.dfa28.predict(self.input)
if alt28 == 1:
# Java.g:1:10: ABSTRACT
pass
self.mABSTRACT()
elif alt28 == 2:
# Java.g:1:19: AND
pass
self.mAND()
elif alt28 == 3:
# Java.g:1:23: AND_ASSIGN
pass
self.mAND_ASSIGN()
elif alt28 == 4:
# Java.g:1:34: ASSERT
pass
self.mASSERT()
elif alt28 == 5:
# Java.g:1:41: ASSIGN
pass
self.mASSIGN()
elif alt28 == 6:
# Java.g:1:48: AT
pass
self.mAT()
elif alt28 == 7:
# Java.g:1:51: BIT_SHIFT_RIGHT
pass
self.mBIT_SHIFT_RIGHT()
elif alt28 == 8:
# Java.g:1:67: BIT_SHIFT_RIGHT_ASSIGN
pass
self.mBIT_SHIFT_RIGHT_ASSIGN()
elif alt28 == 9:
# Java.g:1:90: BOOLEAN
pass
self.mBOOLEAN()
elif alt28 == 10:
# Java.g:1:98: BREAK
pass
self.mBREAK()
elif alt28 == 11:
# Java.g:1:104: BYTE
pass
self.mBYTE()
elif alt28 == 12:
# Java.g:1:109: CASE
pass
self.mCASE()
elif alt28 == 13:
# Java.g:1:114: CATCH
pass
self.mCATCH()
elif alt28 == 14:
# Java.g:1:120: CHAR
pass
self.mCHAR()
elif alt28 == 15:
# Java.g:1:125: CLASS
pass
self.mCLASS()
elif alt28 == 16:
# Java.g:1:131: COLON
pass
self.mCOLON()
elif alt28 == 17:
# Java.g:1:137: COMMA
pass
self.mCOMMA()
elif alt28 == 18:
# Java.g:1:143: CONTINUE
pass
self.mCONTINUE()
elif alt28 == 19:
# Java.g:1:152: DEC
pass
self.mDEC()
elif alt28 == 20:
# Java.g:1:156: DEFAULT
pass
self.mDEFAULT()
elif alt28 == 21:
# Java.g:1:164: DIV
pass
self.mDIV()
elif alt28 == 22:
# Java.g:1:168: DIV_ASSIGN
pass
self.mDIV_ASSIGN()
elif alt28 == 23:
# Java.g:1:179: DO
pass
self.mDO()
elif alt28 == 24:
# Java.g:1:182: DOT
pass
self.mDOT()
elif alt28 == 25:
# Java.g:1:186: DOTSTAR
pass
self.mDOTSTAR()
elif alt28 == 26:
# Java.g:1:194: DOUBLE
pass
self.mDOUBLE()
elif alt28 == 27:
# Java.g:1:201: ELLIPSIS
pass
self.mELLIPSIS()
elif alt28 == 28:
# Java.g:1:210: ELSE
pass
self.mELSE()
elif alt28 == 29:
# Java.g:1:215: ENUM
pass
self.mENUM()
elif alt28 == 30:
# Java.g:1:220: EQUAL
pass
self.mEQUAL()
elif alt28 == 31:
# Java.g:1:226: EXTENDS
pass
self.mEXTENDS()
elif alt28 == 32:
# Java.g:1:234: FALSE
pass
self.mFALSE()
elif alt28 == 33:
# Java.g:1:240: FINAL
pass
self.mFINAL()
elif alt28 == 34:
# Java.g:1:246: FINALLY
pass
self.mFINALLY()
elif alt28 == 35:
# Java.g:1:254: FLOAT
pass
self.mFLOAT()
elif alt28 == 36:
# Java.g:1:260: FOR
pass
self.mFOR()
elif alt28 == 37:
# Java.g:1:264: GREATER_OR_EQUAL
pass
self.mGREATER_OR_EQUAL()
elif alt28 == 38:
# Java.g:1:281: GREATER_THAN
pass
self.mGREATER_THAN()
elif alt28 == 39:
# Java.g:1:294: IF
pass
self.mIF()
elif alt28 == 40:
# Java.g:1:297: IMPLEMENTS
pass
self.mIMPLEMENTS()
elif alt28 == 41:
# Java.g:1:308: IMPORT
pass
self.mIMPORT()
elif alt28 == 42:
# Java.g:1:315: INC
pass
self.mINC()
elif alt28 == 43:
# Java.g:1:319: INSTANCEOF
pass
self.mINSTANCEOF()
elif alt28 == 44:
# Java.g:1:330: INT
pass
self.mINT()
elif alt28 == 45:
# Java.g:1:334: INTERFACE
pass
self.mINTERFACE()
elif alt28 == 46:
# Java.g:1:344: LBRACK
pass
self.mLBRACK()
elif alt28 == 47:
# Java.g:1:351: LCURLY
pass
self.mLCURLY()
elif alt28 == 48:
# Java.g:1:358: LESS_OR_EQUAL
pass
self.mLESS_OR_EQUAL()
elif alt28 == 49:
# Java.g:1:372: LESS_THAN
pass
self.mLESS_THAN()
elif alt28 == 50:
# Java.g:1:382: LOGICAL_AND
pass
self.mLOGICAL_AND()
elif alt28 == 51:
# Java.g:1:394: LOGICAL_NOT
pass
self.mLOGICAL_NOT()
elif alt28 == 52:
# Java.g:1:406: LOGICAL_OR
pass
self.mLOGICAL_OR()
elif alt28 == 53:
# Java.g:1:417: LONG
pass
self.mLONG()
elif alt28 == 54:
# Java.g:1:422: LPAREN
pass
self.mLPAREN()
elif alt28 == 55:
# Java.g:1:429: MINUS
pass
self.mMINUS()
elif alt28 == 56:
# Java.g:1:435: MINUS_ASSIGN
pass
self.mMINUS_ASSIGN()
elif alt28 == 57:
# Java.g:1:448: MOD
pass
self.mMOD()
elif alt28 == 58:
# Java.g:1:452: MOD_ASSIGN
pass
self.mMOD_ASSIGN()
elif alt28 == 59:
# Java.g:1:463: NATIVE
pass
self.mNATIVE()
elif alt28 == 60:
# Java.g:1:470: NEW
pass
self.mNEW()
elif alt28 == 61:
# Java.g:1:474: NOT
pass
self.mNOT()
elif alt28 == 62:
# Java.g:1:478: NOT_EQUAL
pass
self.mNOT_EQUAL()
elif alt28 == 63:
# Java.g:1:488: NULL
pass
self.mNULL()
elif alt28 == 64:
# Java.g:1:493: OR
pass
self.mOR()
elif alt28 == 65:
# Java.g:1:496: OR_ASSIGN
pass
self.mOR_ASSIGN()
elif alt28 == 66:
# Java.g:1:506: PACKAGE
pass
self.mPACKAGE()
elif alt28 == 67:
# Java.g:1:514: PLUS
pass
self.mPLUS()
elif alt28 == 68:
# Java.g:1:519: PLUS_ASSIGN
pass
self.mPLUS_ASSIGN()
elif alt28 == 69:
# Java.g:1:531: PRIVATE
pass
self.mPRIVATE()
elif alt28 == 70:
# Java.g:1:539: PROTECTED
pass
self.mPROTECTED()
elif alt28 == 71:
# Java.g:1:549: PUBLIC
pass
self.mPUBLIC()
elif alt28 == 72:
# Java.g:1:556: QUESTION
pass
self.mQUESTION()
elif alt28 == 73:
# Java.g:1:565: RBRACK
pass
self.mRBRACK()
elif alt28 == 74:
# Java.g:1:572: RCURLY
pass
self.mRCURLY()
elif alt28 == 75:
# Java.g:1:579: RETURN
pass
self.mRETURN()
elif alt28 == 76:
# Java.g:1:586: RPAREN
pass
self.mRPAREN()
elif alt28 == 77:
# Java.g:1:593: SEMI
pass
self.mSEMI()
elif alt28 == 78:
# Java.g:1:598: SHIFT_LEFT
pass
self.mSHIFT_LEFT()
elif alt28 == 79:
# Java.g:1:609: SHIFT_LEFT_ASSIGN
pass
self.mSHIFT_LEFT_ASSIGN()
elif alt28 == 80:
# Java.g:1:627: SHIFT_RIGHT
pass
self.mSHIFT_RIGHT()
elif alt28 == 81:
# Java.g:1:639: SHIFT_RIGHT_ASSIGN
pass
self.mSHIFT_RIGHT_ASSIGN()
elif alt28 == 82:
# Java.g:1:658: SHORT
pass
self.mSHORT()
elif alt28 == 83:
# Java.g:1:664: STAR
pass
self.mSTAR()
elif alt28 == 84:
# Java.g:1:669: STAR_ASSIGN
pass
self.mSTAR_ASSIGN()
elif alt28 == 85:
# Java.g:1:681: STATIC
pass
self.mSTATIC()
elif alt28 == 86:
# Java.g:1:688: STRICTFP
pass
self.mSTRICTFP()
elif alt28 == 87:
# Java.g:1:697: SUPER
pass
self.mSUPER()
elif alt28 == 88:
# Java.g:1:703: SWITCH
pass
self.mSWITCH()
elif alt28 == 89:
# Java.g:1:710: SYNCHRONIZED
pass
self.mSYNCHRONIZED()
elif alt28 == 90:
# Java.g:1:723: THIS
pass
self.mTHIS()
elif alt28 == 91:
# Java.g:1:728: THROW
pass
self.mTHROW()
elif alt28 == 92:
# Java.g:1:734: THROWS
pass
self.mTHROWS()
elif alt28 == 93:
# Java.g:1:741: TRANSIENT
pass
self.mTRANSIENT()
elif alt28 == 94:
# Java.g:1:751: TRUE
pass
self.mTRUE()
elif alt28 == 95:
# Java.g:1:756: TRY
pass
self.mTRY()
elif alt28 == 96:
# Java.g:1:760: VOID
pass
self.mVOID()
elif alt28 == 97:
# Java.g:1:765: VOLATILE
pass
self.mVOLATILE()
elif alt28 == 98:
# Java.g:1:774: WHILE
pass
self.mWHILE()
elif alt28 == 99:
# Java.g:1:780: XOR
pass
self.mXOR()
elif alt28 == 100:
# Java.g:1:784: XOR_ASSIGN
pass
self.mXOR_ASSIGN()
elif alt28 == 101:
# Java.g:1:795: HEX_LITERAL
pass
self.mHEX_LITERAL()
elif alt28 == 102:
# Java.g:1:807: DECIMAL_LITERAL
pass
self.mDECIMAL_LITERAL()
elif alt28 == 103:
# Java.g:1:823: OCTAL_LITERAL
pass
self.mOCTAL_LITERAL()
elif alt28 == 104:
# Java.g:1:837: FLOATING_POINT_LITERAL
pass
self.mFLOATING_POINT_LITERAL()
elif alt28 == 105:
# Java.g:1:860: CHARACTER_LITERAL
pass
self.mCHARACTER_LITERAL()
elif alt28 == 106:
# Java.g:1:878: STRING_LITERAL
pass
self.mSTRING_LITERAL()
elif alt28 == 107:
# Java.g:1:893: IDENT
pass
self.mIDENT()
elif alt28 == 108:
# Java.g:1:899: WS
pass
self.mWS()
elif alt28 == 109:
# Java.g:1:902: COMMENT
pass
self.mCOMMENT()
elif alt28 == 110:
# Java.g:1:910: LINE_COMMENT
pass
self.mLINE_COMMENT()
# lookup tables for DFA #28
DFA28_eot = DFA.unpack(
u"\1\uffff\1\55\1\63\1\65\1\uffff\1\70\2\55\2\uffff\1\102\1\55\1"
u"\110\1\113\3\55\1\131\2\uffff\1\134\1\136\1\141\1\55\1\uffff\1"
u"\144\1\55\1\uffff\1\55\3\uffff\1\55\2\uffff\1\55\1\162\3\55\1\170"
u"\2\172\4\uffff\2\55\5\uffff\1\u0081\2\uffff\7\55\3\uffff\1\55\1"
u"\u008c\10\uffff\7\55\1\u0094\2\55\4\uffff\1\u0099\6\uffff\1\55"
u"\2\uffff\14\55\2\uffff\4\55\4\uffff\1\u00b1\1\172\2\55\1\u00b5"
u"\2\uffff\12\55\1\uffff\6\55\1\u00c6\1\uffff\2\55\1\u00cb\2\uffff"
u"\2\55\1\u00ce\20\55\1\u00df\3\55\1\uffff\2\55\2\uffff\2\55\1\u00e7"
u"\1\u00e8\1\55\1\u00ea\4\55\1\u00ef\1\u00f0\4\55\1\uffff\4\55\1"
u"\uffff\1\u00f9\1\55\1\uffff\1\u00fb\13\55\1\u0107\2\55\1\u010a"
u"\1\uffff\1\u010b\5\55\1\u0111\2\uffff\1\u0112\1\uffff\1\u0113\3"
u"\55\2\uffff\1\55\1\u0118\1\u011a\1\u011b\4\55\1\uffff\1\55\1\uffff"
u"\5\55\1\u0126\2\55\1\u0129\2\55\1\uffff\1\u012d\1\55\2\uffff\1"
u"\55\1\u0130\1\55\1\u0132\1\55\3\uffff\2\55\1\u0136\1\55\1\uffff"
u"\1\55\2\uffff\1\55\1\u013a\2\55\1\u013d\3\55\1\u0141\1\u0142\1"
u"\uffff\1\u0143\1\55\1\uffff\1\u0145\1\55\1\u0147\1\uffff\2\55\1"
u"\uffff\1\55\1\uffff\1\u014b\1\55\1\u014d\1\uffff\1\u014e\1\u014f"
u"\1\55\1\uffff\2\55\1\uffff\1\u0153\1\u0154\1\55\3\uffff\1\55\1"
u"\uffff\1\55\1\uffff\2\55\1\u015a\1\uffff\1\u015b\3\uffff\3\55\2"
u"\uffff\1\55\1\u0160\2\55\1\u0163\2\uffff\2\55\1\u0166\1\u0167\1"
u"\uffff\1\55\1\u0169\1\uffff\1\u016a\1\u016b\2\uffff\1\55\3\uffff"
u"\1\55\1\u016e\1\uffff"
)
DFA28_eof = DFA.unpack(
u"\u016f\uffff"
)
DFA28_min = DFA.unpack(
u"\1\11\1\142\1\46\1\75\1\uffff\1\75\1\157\1\141\2\uffff\1\55\1\145"
u"\2\52\1\154\1\141\1\146\1\53\2\uffff\1\74\2\75\1\157\1\uffff\1"
u"\75\1\141\1\uffff\1\141\3\uffff\1\145\2\uffff\1\150\1\75\1\150"
u"\1\157\1\150\1\75\2\56\4\uffff\2\163\5\uffff\1\75\2\uffff\1\157"
u"\1\145\1\164\1\163\2\141\1\156\3\uffff\1\146\1\44\10\uffff\1\163"
u"\1\165\1\164\1\154\1\156\1\157\1\162\1\44\1\160\1\163\4\uffff\1"
u"\75\6\uffff\1\156\2\uffff\1\164\1\167\1\154\1\143\1\151\1\142\1"
u"\164\1\157\1\141\1\160\1\151\1\156\2\uffff\1\151\1\141\2\151\4"
u"\uffff\2\56\1\164\1\145\1\75\2\uffff\1\154\1\141\2\145\1\143\1"
u"\162\1\163\1\164\1\141\1\142\1\uffff\1\145\1\155\1\145\1\163\2"
u"\141\1\44\1\uffff\1\154\1\164\1\44\2\uffff\1\147\1\151\1\44\1\154"
u"\1\153\1\166\1\164\1\154\1\165\1\162\1\164\1\151\1\145\1\164\1"
u"\143\1\163\1\157\1\156\1\145\1\44\1\144\1\141\1\154\1\uffff\2\162"
u"\2\uffff\1\145\1\153\2\44\1\150\1\44\1\163\1\151\1\165\1\154\2"
u"\44\1\156\1\145\1\154\1\164\1\uffff\1\145\1\162\1\141\1\162\1\uffff"
u"\1\44\1\166\1\uffff\1\44\2\141\1\145\1\151\1\162\1\164\1\151\1"
u"\143\1\162\1\143\1\150\1\44\1\167\1\163\1\44\1\uffff\1\44\1\164"
u"\1\145\1\141\1\164\1\141\1\44\2\uffff\1\44\1\uffff\1\44\1\156\1"
u"\154\1\145\2\uffff\1\144\3\44\1\155\1\164\1\156\1\146\1\uffff\1"
u"\145\1\uffff\1\147\1\164\2\143\1\156\1\44\1\143\1\164\1\44\1\150"
u"\1\162\1\uffff\1\44\1\151\2\uffff\1\151\1\44\1\143\1\44\1\156\3"
u"\uffff\1\165\1\164\1\44\1\163\1\uffff\1\171\2\uffff\1\145\1\44"
u"\1\143\1\141\1\44\2\145\1\164\2\44\1\uffff\1\44\1\146\1\uffff\1"
u"\44\1\157\1\44\1\uffff\1\145\1\154\1\uffff\1\164\1\uffff\1\44\1"
u"\145\1\44\1\uffff\2\44\1\156\1\uffff\1\145\1\143\1\uffff\2\44\1"
u"\145\3\uffff\1\160\1\uffff\1\156\1\uffff\1\156\1\145\1\44\1\uffff"
u"\1\44\3\uffff\1\164\1\157\1\145\2\uffff\1\144\1\44\1\151\1\164"
u"\1\44\2\uffff\1\163\1\146\2\44\1\uffff\1\172\1\44\1\uffff\2\44"
u"\2\uffff\1\145\3\uffff\1\144\1\44\1\uffff"
)
DFA28_max = DFA.unpack(
u"\1\ufaff\1\163\2\75\1\uffff\1\76\1\171\1\157\2\uffff\1\75\1\157"
u"\1\75\1\71\1\170\1\157\1\156\1\75\2\uffff\2\75\1\174\1\157\1\uffff"
u"\1\75\1\165\1\uffff\1\165\3\uffff\1\145\2\uffff\1\171\1\75\1\162"
u"\1\157\1\150\1\75\1\170\1\146\4\uffff\2\163\5\uffff\1\76\2\uffff"
u"\1\157\1\145\2\164\2\141\1\156\3\uffff\1\146\1\ufaff\10\uffff\1"
u"\163\1\165\1\164\1\154\1\156\1\157\1\162\1\ufaff\1\160\1\164\4"
u"\uffff\1\75\6\uffff\1\156\2\uffff\1\164\1\167\1\154\1\143\1\157"
u"\1\142\1\164\1\157\1\162\1\160\1\151\1\156\2\uffff\1\162\1\171"
u"\1\154\1\151\4\uffff\2\146\1\164\1\145\1\75\2\uffff\1\154\1\141"
u"\2\145\1\143\1\162\1\163\1\164\1\141\1\142\1\uffff\1\145\1\155"
u"\1\145\1\163\2\141\1\ufaff\1\uffff\1\157\1\164\1\ufaff\2\uffff"
u"\1\147\1\151\1\ufaff\1\154\1\153\1\166\1\164\1\154\1\165\1\162"
u"\1\164\1\151\1\145\1\164\1\143\1\163\1\157\1\156\1\145\1\ufaff"
u"\1\144\1\141\1\154\1\uffff\2\162\2\uffff\1\145\1\153\2\ufaff\1"
u"\150\1\ufaff\1\163\1\151\1\165\1\154\2\ufaff\1\156\1\145\1\154"
u"\1\164\1\uffff\1\145\1\162\1\141\1\162\1\uffff\1\ufaff\1\166\1"
u"\uffff\1\ufaff\2\141\1\145\1\151\1\162\1\164\1\151\1\143\1\162"
u"\1\143\1\150\1\ufaff\1\167\1\163\1\ufaff\1\uffff\1\ufaff\1\164"
u"\1\145\1\141\1\164\1\141\1\ufaff\2\uffff\1\ufaff\1\uffff\1\ufaff"
u"\1\156\1\154\1\145\2\uffff\1\144\3\ufaff\1\155\1\164\1\156\1\146"
u"\1\uffff\1\145\1\uffff\1\147\1\164\2\143\1\156\1\ufaff\1\143\1"
u"\164\1\ufaff\1\150\1\162\1\uffff\1\ufaff\1\151\2\uffff\1\151\1"
u"\ufaff\1\143\1\ufaff\1\156\3\uffff\1\165\1\164\1\ufaff\1\163\1"
u"\uffff\1\171\2\uffff\1\145\1\ufaff\1\143\1\141\1\ufaff\2\145\1"
u"\164\2\ufaff\1\uffff\1\ufaff\1\146\1\uffff\1\ufaff\1\157\1\ufaff"
u"\1\uffff\1\145\1\154\1\uffff\1\164\1\uffff\1\ufaff\1\145\1\ufaff"
u"\1\uffff\2\ufaff\1\156\1\uffff\1\145\1\143\1\uffff\2\ufaff\1\145"
u"\3\uffff\1\160\1\uffff\1\156\1\uffff\1\156\1\145\1\ufaff\1\uffff"
u"\1\ufaff\3\uffff\1\164\1\157\1\145\2\uffff\1\144\1\ufaff\1\151"
u"\1\164\1\ufaff\2\uffff\1\163\1\146\2\ufaff\1\uffff\1\172\1\ufaff"
u"\1\uffff\2\ufaff\2\uffff\1\145\3\uffff\1\144\1\ufaff\1\uffff"
)
DFA28_accept = DFA.unpack(
u"\4\uffff\1\6\3\uffff\1\20\1\21\10\uffff\1\56\1\57\4\uffff\1\66"
u"\2\uffff\1\75\1\uffff\1\110\1\111\1\112\1\uffff\1\114\1\115\10"
u"\uffff\1\151\1\152\1\153\1\154\2\uffff\1\3\1\62\1\2\1\36\1\5\1"
u"\uffff\1\45\1\46\7\uffff\1\23\1\70\1\67\2\uffff\1\26\1\155\1\156"
u"\1\25\1\31\1\33\1\30\1\150\12\uffff\1\52\1\104\1\103\1\60\1\uffff"
u"\1\61\1\76\1\63\1\64\1\101\1\100\1\uffff\1\72\1\71\14\uffff\1\124"
u"\1\123\4\uffff\1\144\1\143\1\145\1\146\5\uffff\1\121\1\120\12\uffff"
u"\1\27\7\uffff\1\47\3\uffff\1\117\1\116\27\uffff\1\147\2\uffff\1"
u"\10\1\7\20\uffff\1\44\4\uffff\1\54\2\uffff\1\74\20\uffff\1\137"
u"\7\uffff\1\13\1\14\1\uffff\1\16\4\uffff\1\34\1\35\10\uffff\1\65"
u"\1\uffff\1\77\13\uffff\1\132\2\uffff\1\136\1\140\5\uffff\1\12\1"
u"\15\1\17\4\uffff\1\40\1\uffff\1\41\1\43\12\uffff\1\122\2\uffff"
u"\1\127\3\uffff\1\133\2\uffff\1\142\1\uffff\1\4\3\uffff\1\32\3\uffff"
u"\1\51\2\uffff\1\73\3\uffff\1\107\1\113\1\125\1\uffff\1\130\1\uffff"
u"\1\134\3\uffff\1\11\1\uffff\1\24\1\37\1\42\3\uffff\1\102\1\105"
u"\5\uffff\1\1\1\22\4\uffff\1\126\2\uffff\1\141\2\uffff\1\55\1\106"
u"\1\uffff\1\135\1\50\1\53\2\uffff\1\131"
)
DFA28_special = DFA.unpack(
u"\u016f\uffff"
)
DFA28_transition = [
DFA.unpack(u"\2\56\1\uffff\2\56\22\uffff\1\56\1\25\1\54\1\uffff\1"
u"\55\1\31\1\2\1\53\1\30\1\41\1\44\1\21\1\11\1\12\1\15\1\14\1\51"
u"\11\52\1\10\1\42\1\24\1\3\1\5\1\35\1\4\32\55\1\22\1\uffff\1\36"
u"\1\50\1\55\1\uffff\1\1\1\6\1\7\1\13\1\16\1\17\2\55\1\20\2\55\1"
u"\27\1\55\1\32\1\55\1\34\1\55\1\40\1\43\1\45\1\55\1\46\1\47\3\55"
u"\1\23\1\26\1\37\1\33\101\uffff\27\55\1\uffff\37\55\1\uffff\u1f08"
u"\55\u1040\uffff\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e"
u"\55\u10d2\uffff\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u"\1\57\20\uffff\1\60"),
DFA.unpack(u"\1\62\26\uffff\1\61"),
DFA.unpack(u"\1\64"),
DFA.unpack(u""),
DFA.unpack(u"\1\67\1\66"),
DFA.unpack(u"\1\71\2\uffff\1\72\6\uffff\1\73"),
DFA.unpack(u"\1\74\6\uffff\1\75\3\uffff\1\76\2\uffff\1\77"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\100\17\uffff\1\101"),
DFA.unpack(u"\1\103\11\uffff\1\104"),
DFA.unpack(u"\1\106\4\uffff\1\107\15\uffff\1\105"),
DFA.unpack(u"\1\111\3\uffff\1\112\1\uffff\12\114"),
DFA.unpack(u"\1\115\1\uffff\1\116\11\uffff\1\117"),
DFA.unpack(u"\1\120\7\uffff\1\121\2\uffff\1\122\2\uffff\1\123"),
DFA.unpack(u"\1\124\6\uffff\1\125\1\126"),
DFA.unpack(u"\1\127\21\uffff\1\130"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\133\1\132"),
DFA.unpack(u"\1\135"),
DFA.unpack(u"\1\140\76\uffff\1\137"),
DFA.unpack(u"\1\142"),
DFA.unpack(u""),
DFA.unpack(u"\1\143"),
DFA.unpack(u"\1\145\3\uffff\1\146\17\uffff\1\147"),
DFA.unpack(u""),
DFA.unpack(u"\1\150\20\uffff\1\151\2\uffff\1\152"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\153"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\154\13\uffff\1\155\1\156\1\uffff\1\157\1\uffff\1"
u"\160"),
DFA.unpack(u"\1\161"),
DFA.unpack(u"\1\163\11\uffff\1\164"),
DFA.unpack(u"\1\165"),
DFA.unpack(u"\1\166"),
DFA.unpack(u"\1\167"),
DFA.unpack(u"\1\114\1\uffff\10\173\2\114\12\uffff\3\114\21\uffff"
u"\1\171\13\uffff\3\114\21\uffff\1\171"),
DFA.unpack(u"\1\114\1\uffff\12\174\12\uffff\3\114\35\uffff\3\114"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\175"),
DFA.unpack(u"\1\176"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\u0080\1\177"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\u0082"),
DFA.unpack(u"\1\u0083"),
DFA.unpack(u"\1\u0084"),
DFA.unpack(u"\1\u0085\1\u0086"),
DFA.unpack(u"\1\u0087"),
DFA.unpack(u"\1\u0088"),
DFA.unpack(u"\1\u0089"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\u008a"),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\24\55\1\u008b\5\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08"
u"\55\u1040\uffff\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e"
u"\55\u10d2\uffff\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\u008d"),
DFA.unpack(u"\1\u008e"),
DFA.unpack(u"\1\u008f"),
DFA.unpack(u"\1\u0090"),
DFA.unpack(u"\1\u0091"),
DFA.unpack(u"\1\u0092"),
DFA.unpack(u"\1\u0093"),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\32\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08\55\u1040\uffff"
u"\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e\55\u10d2\uffff"
u"\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u"\1\u0095"),
DFA.unpack(u"\1\u0096\1\u0097"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\u0098"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\u009a"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\u009b"),
DFA.unpack(u"\1\u009c"),
DFA.unpack(u"\1\u009d"),
DFA.unpack(u"\1\u009e"),
DFA.unpack(u"\1\u009f\5\uffff\1\u00a0"),
DFA.unpack(u"\1\u00a1"),
DFA.unpack(u"\1\u00a2"),
DFA.unpack(u"\1\u00a3"),
DFA.unpack(u"\1\u00a4\20\uffff\1\u00a5"),
DFA.unpack(u"\1\u00a6"),
DFA.unpack(u"\1\u00a7"),
DFA.unpack(u"\1\u00a8"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\u00a9\10\uffff\1\u00aa"),
DFA.unpack(u"\1\u00ab\23\uffff\1\u00ac\3\uffff\1\u00ad"),
DFA.unpack(u"\1\u00ae\2\uffff\1\u00af"),
DFA.unpack(u"\1\u00b0"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\114\1\uffff\10\173\2\114\12\uffff\3\114\35\uffff"
u"\3\114"),
DFA.unpack(u"\1\114\1\uffff\12\174\12\uffff\3\114\35\uffff\3\114"),
DFA.unpack(u"\1\u00b2"),
DFA.unpack(u"\1\u00b3"),
DFA.unpack(u"\1\u00b4"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\u00b6"),
DFA.unpack(u"\1\u00b7"),
DFA.unpack(u"\1\u00b8"),
DFA.unpack(u"\1\u00b9"),
DFA.unpack(u"\1\u00ba"),
DFA.unpack(u"\1\u00bb"),
DFA.unpack(u"\1\u00bc"),
DFA.unpack(u"\1\u00bd"),
DFA.unpack(u"\1\u00be"),
DFA.unpack(u"\1\u00bf"),
DFA.unpack(u""),
DFA.unpack(u"\1\u00c0"),
DFA.unpack(u"\1\u00c1"),
DFA.unpack(u"\1\u00c2"),
DFA.unpack(u"\1\u00c3"),
DFA.unpack(u"\1\u00c4"),
DFA.unpack(u"\1\u00c5"),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\32\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08\55\u1040\uffff"
u"\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e\55\u10d2\uffff"
u"\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u""),
DFA.unpack(u"\1\u00c7\2\uffff\1\u00c8"),
DFA.unpack(u"\1\u00c9"),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\4\55\1\u00ca\25\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08"
u"\55\u1040\uffff\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e"
u"\55\u10d2\uffff\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\u00cc"),
DFA.unpack(u"\1\u00cd"),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\32\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08\55\u1040\uffff"
u"\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e\55\u10d2\uffff"
u"\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u"\1\u00cf"),
DFA.unpack(u"\1\u00d0"),
DFA.unpack(u"\1\u00d1"),
DFA.unpack(u"\1\u00d2"),
DFA.unpack(u"\1\u00d3"),
DFA.unpack(u"\1\u00d4"),
DFA.unpack(u"\1\u00d5"),
DFA.unpack(u"\1\u00d6"),
DFA.unpack(u"\1\u00d7"),
DFA.unpack(u"\1\u00d8"),
DFA.unpack(u"\1\u00d9"),
DFA.unpack(u"\1\u00da"),
DFA.unpack(u"\1\u00db"),
DFA.unpack(u"\1\u00dc"),
DFA.unpack(u"\1\u00dd"),
DFA.unpack(u"\1\u00de"),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\32\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08\55\u1040\uffff"
u"\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e\55\u10d2\uffff"
u"\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u"\1\u00e0"),
DFA.unpack(u"\1\u00e1"),
DFA.unpack(u"\1\u00e2"),
DFA.unpack(u""),
DFA.unpack(u"\1\u00e3"),
DFA.unpack(u"\1\u00e4"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\u00e5"),
DFA.unpack(u"\1\u00e6"),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\32\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08\55\u1040\uffff"
u"\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e\55\u10d2\uffff"
u"\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\32\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08\55\u1040\uffff"
u"\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e\55\u10d2\uffff"
u"\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u"\1\u00e9"),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\32\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08\55\u1040\uffff"
u"\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e\55\u10d2\uffff"
u"\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u"\1\u00eb"),
DFA.unpack(u"\1\u00ec"),
DFA.unpack(u"\1\u00ed"),
DFA.unpack(u"\1\u00ee"),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\32\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08\55\u1040\uffff"
u"\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e\55\u10d2\uffff"
u"\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\32\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08\55\u1040\uffff"
u"\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e\55\u10d2\uffff"
u"\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u"\1\u00f1"),
DFA.unpack(u"\1\u00f2"),
DFA.unpack(u"\1\u00f3"),
DFA.unpack(u"\1\u00f4"),
DFA.unpack(u""),
DFA.unpack(u"\1\u00f5"),
DFA.unpack(u"\1\u00f6"),
DFA.unpack(u"\1\u00f7"),
DFA.unpack(u"\1\u00f8"),
DFA.unpack(u""),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\32\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08\55\u1040\uffff"
u"\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e\55\u10d2\uffff"
u"\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u"\1\u00fa"),
DFA.unpack(u""),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\32\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08\55\u1040\uffff"
u"\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e\55\u10d2\uffff"
u"\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u"\1\u00fc"),
DFA.unpack(u"\1\u00fd"),
DFA.unpack(u"\1\u00fe"),
DFA.unpack(u"\1\u00ff"),
DFA.unpack(u"\1\u0100"),
DFA.unpack(u"\1\u0101"),
DFA.unpack(u"\1\u0102"),
DFA.unpack(u"\1\u0103"),
DFA.unpack(u"\1\u0104"),
DFA.unpack(u"\1\u0105"),
DFA.unpack(u"\1\u0106"),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\32\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08\55\u1040\uffff"
u"\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e\55\u10d2\uffff"
u"\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u"\1\u0108"),
DFA.unpack(u"\1\u0109"),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\32\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08\55\u1040\uffff"
u"\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e\55\u10d2\uffff"
u"\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u""),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\32\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08\55\u1040\uffff"
u"\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e\55\u10d2\uffff"
u"\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u"\1\u010c"),
DFA.unpack(u"\1\u010d"),
DFA.unpack(u"\1\u010e"),
DFA.unpack(u"\1\u010f"),
DFA.unpack(u"\1\u0110"),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\32\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08\55\u1040\uffff"
u"\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e\55\u10d2\uffff"
u"\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\32\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08\55\u1040\uffff"
u"\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e\55\u10d2\uffff"
u"\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u""),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\32\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08\55\u1040\uffff"
u"\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e\55\u10d2\uffff"
u"\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u"\1\u0114"),
DFA.unpack(u"\1\u0115"),
DFA.unpack(u"\1\u0116"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\u0117"),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\32\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08\55\u1040\uffff"
u"\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e\55\u10d2\uffff"
u"\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\13\55\1\u0119\16\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08"
u"\55\u1040\uffff\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e"
u"\55\u10d2\uffff\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\32\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08\55\u1040\uffff"
u"\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e\55\u10d2\uffff"
u"\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u"\1\u011c"),
DFA.unpack(u"\1\u011d"),
DFA.unpack(u"\1\u011e"),
DFA.unpack(u"\1\u011f"),
DFA.unpack(u""),
DFA.unpack(u"\1\u0120"),
DFA.unpack(u""),
DFA.unpack(u"\1\u0121"),
DFA.unpack(u"\1\u0122"),
DFA.unpack(u"\1\u0123"),
DFA.unpack(u"\1\u0124"),
DFA.unpack(u"\1\u0125"),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\32\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08\55\u1040\uffff"
u"\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e\55\u10d2\uffff"
u"\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u"\1\u0127"),
DFA.unpack(u"\1\u0128"),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\32\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08\55\u1040\uffff"
u"\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e\55\u10d2\uffff"
u"\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u"\1\u012a"),
DFA.unpack(u"\1\u012b"),
DFA.unpack(u""),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\22\55\1\u012c\7\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08"
u"\55\u1040\uffff\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e"
u"\55\u10d2\uffff\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u"\1\u012e"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\u012f"),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\32\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08\55\u1040\uffff"
u"\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e\55\u10d2\uffff"
u"\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u"\1\u0131"),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\32\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08\55\u1040\uffff"
u"\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e\55\u10d2\uffff"
u"\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u"\1\u0133"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\u0134"),
DFA.unpack(u"\1\u0135"),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\32\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08\55\u1040\uffff"
u"\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e\55\u10d2\uffff"
u"\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u"\1\u0137"),
DFA.unpack(u""),
DFA.unpack(u"\1\u0138"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\u0139"),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\32\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08\55\u1040\uffff"
u"\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e\55\u10d2\uffff"
u"\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u"\1\u013b"),
DFA.unpack(u"\1\u013c"),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\32\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08\55\u1040\uffff"
u"\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e\55\u10d2\uffff"
u"\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u"\1\u013e"),
DFA.unpack(u"\1\u013f"),
DFA.unpack(u"\1\u0140"),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\32\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08\55\u1040\uffff"
u"\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e\55\u10d2\uffff"
u"\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\32\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08\55\u1040\uffff"
u"\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e\55\u10d2\uffff"
u"\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u""),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\32\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08\55\u1040\uffff"
u"\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e\55\u10d2\uffff"
u"\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u"\1\u0144"),
DFA.unpack(u""),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\32\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08\55\u1040\uffff"
u"\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e\55\u10d2\uffff"
u"\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u"\1\u0146"),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\32\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08\55\u1040\uffff"
u"\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e\55\u10d2\uffff"
u"\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u""),
DFA.unpack(u"\1\u0148"),
DFA.unpack(u"\1\u0149"),
DFA.unpack(u""),
DFA.unpack(u"\1\u014a"),
DFA.unpack(u""),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\32\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08\55\u1040\uffff"
u"\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e\55\u10d2\uffff"
u"\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u"\1\u014c"),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\32\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08\55\u1040\uffff"
u"\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e\55\u10d2\uffff"
u"\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u""),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\32\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08\55\u1040\uffff"
u"\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e\55\u10d2\uffff"
u"\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\32\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08\55\u1040\uffff"
u"\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e\55\u10d2\uffff"
u"\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u"\1\u0150"),
DFA.unpack(u""),
DFA.unpack(u"\1\u0151"),
DFA.unpack(u"\1\u0152"),
DFA.unpack(u""),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\32\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08\55\u1040\uffff"
u"\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e\55\u10d2\uffff"
u"\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\32\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08\55\u1040\uffff"
u"\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e\55\u10d2\uffff"
u"\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u"\1\u0155"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\u0156"),
DFA.unpack(u""),
DFA.unpack(u"\1\u0157"),
DFA.unpack(u""),
DFA.unpack(u"\1\u0158"),
DFA.unpack(u"\1\u0159"),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\32\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08\55\u1040\uffff"
u"\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e\55\u10d2\uffff"
u"\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u""),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\32\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08\55\u1040\uffff"
u"\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e\55\u10d2\uffff"
u"\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\u015c"),
DFA.unpack(u"\1\u015d"),
DFA.unpack(u"\1\u015e"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\u015f"),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\32\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08\55\u1040\uffff"
u"\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e\55\u10d2\uffff"
u"\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u"\1\u0161"),
DFA.unpack(u"\1\u0162"),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\32\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08\55\u1040\uffff"
u"\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e\55\u10d2\uffff"
u"\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\u0164"),
DFA.unpack(u"\1\u0165"),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\32\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08\55\u1040\uffff"
u"\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e\55\u10d2\uffff"
u"\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\32\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08\55\u1040\uffff"
u"\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e\55\u10d2\uffff"
u"\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u""),
DFA.unpack(u"\1\u0168"),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\32\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08\55\u1040\uffff"
u"\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e\55\u10d2\uffff"
u"\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u""),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\32\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08\55\u1040\uffff"
u"\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e\55\u10d2\uffff"
u"\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\32\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08\55\u1040\uffff"
u"\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e\55\u10d2\uffff"
u"\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\u016c"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\u016d"),
DFA.unpack(u"\1\55\13\uffff\12\55\7\uffff\32\55\4\uffff\1\55\1\uffff"
u"\32\55\105\uffff\27\55\1\uffff\37\55\1\uffff\u1f08\55\u1040\uffff"
u"\u0150\55\u0170\uffff\u0080\55\u0080\uffff\u092e\55\u10d2\uffff"
u"\u5200\55\u5900\uffff\u0200\55"),
DFA.unpack(u"")
]
# class definition for DFA #28
class DFA28(DFA):
pass
def main(argv, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr):
from antlr3.main import LexerMain
main = LexerMain(JavaLexer)
main.stdin = stdin
main.stdout = stdout
main.stderr = stderr
main.execute(argv)
if __name__ == '__main__':
main(sys.argv)
| [
"[email protected]"
] | |
b38a41816cd1d881bbd42a2e2799dc9ff0537b61 | efe60e67d113ec19b0528c005597768e22c120b1 | /development/pdf_to_tiff/__init__.py | aa63407dc7c5fdf2e7951cf6b3ae3b6e06608ede | [
"MIT"
] | permissive | FujinamiRyohei/pdf_to_tiff | ef232b6da17b6c66e4ab490287ed75744e7bf138 | 11489dca943cc1f0c9dd99dc99599e2235473893 | refs/heads/master | 2022-07-13T11:39:22.562193 | 2020-05-11T09:23:59 | 2020-05-11T09:23:59 | 262,969,635 | 0 | 0 | MIT | 2020-05-11T09:24:00 | 2020-05-11T07:25:20 | null | UTF-8 | Python | false | false | 744 | py | from pathlib import Path
from pdf2image import convert_from_path
def convert(input_path, output_path=""):
path = Path(input_path)
if not path.exists():
raise Exception(f"Input file {path} does not exist.")
images = convert_from_path(str(path))
if not output_path:
output_path = path.with_suffix(".tif")
else:
output_path = Path(output_path)
if not output_path.parent.exists():
raise Exception(f"Output file location {output_path.parent} does not exist.")
images = [i.convert("L") for i in images] # to grayscale
images[0].save(
str(output_path),
compression="tiff_deflate",
save_all=True,
append_images=images[1:])
return output_path
| [
"[email protected]"
] | |
d869b8c796aab6c8f1f0aa9bf014786a884aa1c4 | ce5f9a80a06662a6e27dc455795b302af49c8946 | /passwordsecure/migrations/0001_initial.py | f7bba0be7b72f159c969b5096799449855615a41 | [] | no_license | amra0760/mysite | 819d572ce2b4cb150e02e29cfdc5b28f5e55ace3 | 8bc14dcbad04d71137bce5dc2f00178df26cf0aa | refs/heads/master | 2020-03-19T03:24:29.986352 | 2018-06-01T14:05:08 | 2018-06-01T14:05:08 | 135,725,208 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 620 | py | # Generated by Django 2.0.5 on 2018-05-30 08:31
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Website',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.CharField(max_length=1000)),
('password', models.CharField(max_length=1000)),
('site_name', models.CharField(max_length=1000)),
],
),
]
| [
"[email protected]"
] | |
de2d839b60a73ddb17f56fec860dc5252ab08a73 | a3bfccd69d92d29ee59ea60648e8405a746f7e90 | /pytorch/pytorchcv/models/resnet.py | 4c1470080e6caeeea23a24b7c6f159bcb1c88938 | [
"MIT"
] | permissive | semchan/imgclsmob | c007ba7c9effa66f6f6d3144f8ce1ef86d5e74aa | 9c43171207296ac420c887b3429a98c86c351992 | refs/heads/master | 2020-05-22T02:25:17.695096 | 2019-05-11T09:43:19 | 2019-05-11T09:43:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,139 | py | """
ResNet for ImageNet-1K, implemented in PyTorch.
Original paper: 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
"""
__all__ = ['ResNet', 'resnet10', 'resnet12', 'resnet14', 'resnetbc14b', 'resnet16', 'resnet18_wd4', 'resnet18_wd2',
'resnet18_w3d4', 'resnet18', 'resnet26', 'resnetbc26b', 'resnet34', 'resnet50', 'resnet50b', 'resnet101',
'resnet101b', 'resnet152', 'resnet152b', 'resnet200', 'resnet200b', 'ResBlock', 'ResBottleneck', 'ResUnit',
'ResInitBlock']
import os
import torch.nn as nn
import torch.nn.init as init
from .common import conv1x1_block, conv3x3_block, conv7x7_block
class ResBlock(nn.Module):
"""
Simple ResNet block for residual path in ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
"""
def __init__(self,
in_channels,
out_channels,
stride):
super(ResBlock, self).__init__()
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride)
self.conv2 = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
activation=None,
activate=False)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class ResBottleneck(nn.Module):
"""
ResNet bottleneck block for residual path in ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for the second convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for the second convolution layer.
conv1_stride : bool, default False
Whether to use stride in the first or the second convolution layer of the block.
bottleneck_factor : int, default 4
Bottleneck factor.
"""
def __init__(self,
in_channels,
out_channels,
stride,
padding=1,
dilation=1,
conv1_stride=False,
bottleneck_factor=4):
super(ResBottleneck, self).__init__()
mid_channels = out_channels // bottleneck_factor
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
stride=(stride if conv1_stride else 1))
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
stride=(1 if conv1_stride else stride),
padding=padding,
dilation=dilation)
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
activation=None,
activate=False)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class ResUnit(nn.Module):
"""
ResNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for the second convolution layer in bottleneck.
dilation : int or tuple/list of 2 int, default 1
Dilation value for the second convolution layer in bottleneck.
bottleneck : bool, default True
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default False
Whether to use stride in the first or the second convolution layer of the block.
"""
def __init__(self,
in_channels,
out_channels,
stride,
padding=1,
dilation=1,
bottleneck=True,
conv1_stride=False):
super(ResUnit, self).__init__()
self.resize_identity = (in_channels != out_channels) or (stride != 1)
if bottleneck:
self.body = ResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
padding=padding,
dilation=dilation,
conv1_stride=conv1_stride)
else:
self.body = ResBlock(
in_channels=in_channels,
out_channels=out_channels,
stride=stride)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
activation=None,
activate=False)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
x = self.activ(x)
return x
class ResInitBlock(nn.Module):
"""
ResNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(ResInitBlock, self).__init__()
self.conv = conv7x7_block(
in_channels=in_channels,
out_channels=out_channels,
stride=2)
self.pool = nn.MaxPool2d(
kernel_size=3,
stride=2,
padding=1)
def forward(self, x):
x = self.conv(x)
x = self.pool(x)
return x
class ResNet(nn.Module):
"""
ResNet model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
conv1_stride,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(ResNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
stage.add_module("unit{}".format(j + 1), ResUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bottleneck=bottleneck,
conv1_stride=conv1_stride))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_resnet(blocks,
bottleneck=None,
conv1_stride=True,
width_scale=1.0,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create ResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
bottleneck : bool, default None
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default True
Whether to use stride in the first or the second convolution layer in units.
width_scale : float, default 1.0
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if bottleneck is None:
bottleneck = (blocks >= 50)
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14 and not bottleneck:
layers = [2, 2, 1, 1]
elif (blocks == 14) and bottleneck:
layers = [1, 1, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif (blocks == 26) and not bottleneck:
layers = [3, 3, 3, 3]
elif (blocks == 26) and bottleneck:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
else:
raise ValueError("Unsupported ResNet with number of blocks: {}".format(blocks))
if bottleneck:
assert (sum(layers) * 3 + 2 == blocks)
else:
assert (sum(layers) * 2 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij
for j, cij in enumerate(ci)] for i, ci in enumerate(channels)]
init_block_channels = int(init_block_channels * width_scale)
net = ResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def resnet10(**kwargs):
"""
ResNet-10 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=10, model_name="resnet10", **kwargs)
def resnet12(**kwargs):
"""
ResNet-12 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=12, model_name="resnet12", **kwargs)
def resnet14(**kwargs):
"""
ResNet-14 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=14, model_name="resnet14", **kwargs)
def resnetbc14b(**kwargs):
"""
ResNet-BC-14b model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=14, bottleneck=True, conv1_stride=False, model_name="resnetbc14b", **kwargs)
def resnet16(**kwargs):
"""
ResNet-16 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=16, model_name="resnet16", **kwargs)
def resnet18_wd4(**kwargs):
"""
ResNet-18 model with 0.25 width scale from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=18, width_scale=0.25, model_name="resnet18_wd4", **kwargs)
def resnet18_wd2(**kwargs):
"""
ResNet-18 model with 0.5 width scale from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=18, width_scale=0.5, model_name="resnet18_wd2", **kwargs)
def resnet18_w3d4(**kwargs):
"""
ResNet-18 model with 0.75 width scale from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=18, width_scale=0.75, model_name="resnet18_w3d4", **kwargs)
def resnet18(**kwargs):
"""
ResNet-18 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=18, model_name="resnet18", **kwargs)
def resnet26(**kwargs):
"""
ResNet-26 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=26, bottleneck=False, model_name="resnet26", **kwargs)
def resnetbc26b(**kwargs):
"""
ResNet-BC-26b model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="resnetbc26b", **kwargs)
def resnet34(**kwargs):
"""
ResNet-34 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=34, model_name="resnet34", **kwargs)
def resnet50(**kwargs):
"""
ResNet-50 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=50, model_name="resnet50", **kwargs)
def resnet50b(**kwargs):
"""
ResNet-50 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=50, conv1_stride=False, model_name="resnet50b", **kwargs)
def resnet101(**kwargs):
"""
ResNet-101 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=101, model_name="resnet101", **kwargs)
def resnet101b(**kwargs):
"""
ResNet-101 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=101, conv1_stride=False, model_name="resnet101b", **kwargs)
def resnet152(**kwargs):
"""
ResNet-152 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=152, model_name="resnet152", **kwargs)
def resnet152b(**kwargs):
"""
ResNet-152 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=152, conv1_stride=False, model_name="resnet152b", **kwargs)
def resnet200(**kwargs):
"""
ResNet-200 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=200, model_name="resnet200", **kwargs)
def resnet200b(**kwargs):
"""
ResNet-200 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=200, conv1_stride=False, model_name="resnet200b", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
from torch.autograd import Variable
pretrained = False
models = [
resnet10,
resnet12,
resnet14,
resnetbc14b,
resnet16,
resnet18_wd4,
resnet18_wd2,
resnet18_w3d4,
resnet18,
resnet26,
resnetbc26b,
resnet34,
resnet50,
resnet50b,
resnet101,
resnet101b,
resnet152,
resnet152b,
resnet200,
resnet200b,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != resnet10 or weight_count == 5418792)
assert (model != resnet12 or weight_count == 5492776)
assert (model != resnet14 or weight_count == 5788200)
assert (model != resnetbc14b or weight_count == 10064936)
assert (model != resnet16 or weight_count == 6968872)
assert (model != resnet18_wd4 or weight_count == 3937400)
assert (model != resnet18_wd2 or weight_count == 5804296)
assert (model != resnet18_w3d4 or weight_count == 8476056)
assert (model != resnet18 or weight_count == 11689512)
assert (model != resnet26 or weight_count == 17960232)
assert (model != resnetbc26b or weight_count == 15995176)
assert (model != resnet34 or weight_count == 21797672)
assert (model != resnet50 or weight_count == 25557032)
assert (model != resnet50b or weight_count == 25557032)
assert (model != resnet101 or weight_count == 44549160)
assert (model != resnet101b or weight_count == 44549160)
assert (model != resnet152 or weight_count == 60192808)
assert (model != resnet152b or weight_count == 60192808)
assert (model != resnet200 or weight_count == 64673832)
assert (model != resnet200b or weight_count == 64673832)
x = Variable(torch.randn(1, 3, 224, 224))
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| [
"[email protected]"
] | |
6830a86942fe8380c4063347288e1d54efb6ba29 | 3d40e2e449e545fc5fb4fbb9deffb7c19f74a29a | /tracks.py | 0b25c77980dddb152856656fa1017e413284d055 | [] | no_license | gaberosser/bacterial_motility | 108578ac50924cbcc66607c8d8c1c4a8d14ece89 | aecfc9694b5b5017c6b2e980c670dfaa6b3efd28 | refs/heads/master | 2020-03-31T02:27:19.802462 | 2015-02-07T15:21:06 | 2015-02-07T15:21:06 | 30,266,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,169 | py | __author__ = 'gabriel'
import numpy as np
import csv
class Track(object):
def __init__(self, data):
"""
:param data: 2 x N array containing track coordinates
:return:
"""
self.data = np.array(data)
assert self.data.shape[0] == 2, "Data must be in the form of a 2 x N matrix"
def __len__(self):
return self.data.shape[1]
class TrackCollection(object):
@staticmethod
def from_txt(infile):
with open(infile, 'r') as f:
reader = csv.reader(f)
tracks = []
for row in reader:
this_track = [
[float(col) for col in row],
[float(col) for col in reader.next()]
]
tracks.append(Track(this_track))
return TrackCollection(tracks)
def __init__(self, tracks):
"""
:param tracks: Iterable containing track objects
:return:
"""
self.tracks = tracks
for t in tracks:
assert isinstance(t, Track), "Must supply an iterable of Track instances"
def __len__(self):
return len(self.tracks) | [
"[email protected]"
] | |
cbf843f414248919eaa922e8a9c9354837d8315f | 7e9b2788231e6351fb82c2201c74c7228753d327 | /ejercicio4.py | 6ff8f26dfa83e8dcbdb23f030997c76855b43555 | [] | no_license | alexisalvo/Curso_Python | c5d1d1525da1a5be0007047d64a35cca24631dcc | 2aa0a39574f6c416bd252cd91523d623bfc14c55 | refs/heads/master | 2020-03-18T16:37:26.383751 | 2018-06-23T14:34:47 | 2018-06-23T14:34:47 | 134,976,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 213 | py | tiempo_llamada = int(input("duracion llamada:"))
if tiempo_llamada <= 10:
print ("costo de la llamada: 10 pesos")
else:
tiempo_llamada = 10 * 1.05
print ("costo de la llamada es:", tiempo_llamada)
| [
"[email protected]"
] | |
1a00c1dd8988545907b2a26f2331a9be2ba0e08f | ba2c3c2f3fbb80c4360107f6ec2040dcf854daf4 | /blog/views.py | 6938b80cae6c45a5ed0e0d3ccb68ba1750bd0115 | [] | no_license | cameron-yee/djangoblog | 3991b5f1839bd29806e09f1a087855b1ca07fc60 | 4caec6fdfa9e8bc3296ea4f0aae0fcc884da9a24 | refs/heads/master | 2021-01-21T11:34:28.179156 | 2017-09-28T20:34:12 | 2017-09-28T20:34:12 | 102,015,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 481 | py | from django.shortcuts import render
from django.utils import timezone
from .models import Post
from django.shortcuts import render, get_object_or_404
def post_list(request):
posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('-published_date')
return render(request, 'blog/post_list.html', {'posts': posts})
def post_detail(request, pk):
post = get_object_or_404(Post, pk=pk)
return render(request, 'blog/post_detail.html', {'post': post})
| [
"[email protected]"
] | |
205688087db8414396f56222bef9bea3968f4475 | 6c5d2dd3805ceba64c8e26a7c1ffe05a1eb68376 | /pset6/dna/dna.py | 0010add5ca39e84ff631197168cda6826be6dbb8 | [] | no_license | StephPeten/CS50 | ed698125ac91650d69eafbca5cc70af88c0f617d | 363211ea11494f7c4f9cf9f8fd96c4b123ae14e1 | refs/heads/master | 2022-11-19T13:45:48.784891 | 2020-07-13T16:30:33 | 2020-07-13T16:30:33 | 275,923,386 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,264 | py | from csv import reader, DictReader
import csv
from sys import argv, exit
if len(argv) != 3:
print("Format : python dna.py data.csv sequence.txt")
exit(1)
with open(argv[1], newline='') as csvfile:
datareader = csv.reader(csvfile)
for row in datareader:
azote = row
azote.pop(0)
break
azotes = {}
for item in azote:
azotes[item] = 1
with open(argv[2]) as txtfile:
seqreader = reader(txtfile)
for line in seqreader:
seq = line
sequence = seq[0]
for key in azotes:
k = len(key)
count = 0
countMAX = 0
for i in range(len(sequence)):
while count > 0:
count -= 1
if sequence[i: i + k] == key:
while sequence[i - k: i] == sequence[i: i + k]:
count += 1
i += k
if count > countMAX:
countMAX = count
azotes[key] += countMAX
with open(argv[1], newline='') as csvfile:
datareader = DictReader(csvfile)
for moldu in datareader:
match = 0
for sequence in azotes:
if azotes[sequence] == int(moldu[sequence]):
match += 1
if match == len(azotes):
print(moldu['name'])
exit()
print("No match") | [
"[email protected]"
] | |
76c1c12c79264ef1defc9726e8e173ef03eb26e1 | 2c46d86b17ec453923aba0499f82da5c9ba1fee0 | /dashboard/migrations/0011_oopsy_points_remaining.py | be5856d15e6b246effd4d12f0243b6e8b57adaa4 | [
"MIT"
] | permissive | Tomasz-Kluczkowski/Bnice | 39d83bec0a91d410f900e641104cd90f20d05b82 | 75eb66a94a3bf3225691ed6802e674fbcf108571 | refs/heads/master | 2022-12-11T12:11:00.517685 | 2018-12-27T12:30:22 | 2018-12-27T12:30:22 | 127,747,595 | 0 | 0 | MIT | 2021-09-07T23:51:43 | 2018-04-02T11:31:57 | Python | UTF-8 | Python | false | false | 407 | py | # Generated by Django 2.0.3 on 2018-05-01 20:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0010_auto_20180501_2148'),
]
operations = [
migrations.AddField(
model_name='oopsy',
name='points_remaining',
field=models.PositiveSmallIntegerField(default=0),
),
]
| [
"[email protected]"
] | |
004463a30f6f1298467745a2de4268b02ca89081 | 847873a2d291042060f895777c780ba76a1318ac | /scraping.py | 6f4ff1cfd1757b0866e2b5781028f3c27e3a3916 | [] | no_license | bgerrard5392/Mission-to-Mars-1 | 349863a03c753bd22100d31d51698d4829e6f36d | 99d82fa485369bcaf34c895dca7d02cc51969590 | refs/heads/main | 2023-03-05T08:32:02.010210 | 2021-02-20T22:35:45 | 2021-02-20T22:35:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,000 | py | # Import Splinter and BeautifulSoup
from splinter import Browser
from bs4 import BeautifulSoup as soup
import pandas as pd
import datetime as dt
def scrape_all():
# Executable path
from webdriver_manager.chrome import ChromeDriverManager
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser("chrome", **executable_path, headless=True)
news_title, news_paragraph = mars_news(browser)
#Run all scraping functions and store results in a dictionary
data = {
"news_title": news_title,
"news_paragraph": news_paragraph,
"featured_image": featured_image(browser),
"facts": mars_facts(),
"hemispheres": mars_hemis(browser),
"last_modified": dt.datetime.now()
}
# Stop webdriver and return data
browser.quit()
return data
def mars_news(browser):
# Scrape Mars News
# Visit the mars nasa news site
url = 'https://mars.nasa.gov/news/'
browser.visit(url)
# Optional delay for loading the page
browser.is_element_present_by_css("ul.item_list li.slide", wait_time=1)
# Convert the browser html to a soup object and then quit the browser
html = browser.html
news_soup = soup(html, 'html.parser')
# Add try/except for error handling
try:
slide_elem = news_soup.select_one("ul.item_list li.slide")
# Use the parent element to find the first 'a' tag and save it as 'news_title'
news_title = slide_elem.find("div", class_="content_title").get_text()
# Use the parent element to find the paragraph text
news_p = slide_elem.find("div", class_="article_teaser_body").get_text()
except AttributeError:
return None, None
return news_title, news_p
def featured_image(browser):
# Visit URL
url = 'https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/index.html'
browser.visit(url)
# Find and click the full image button
full_image_elem = browser.find_by_tag('button')[1]
full_image_elem.click()
# Parse the resulting html with soup
html = browser.html
img_soup = soup(html, 'html.parser')
# Add try/except for error handling
try:
# Find the relative image url
img_url_rel = img_soup.find('img', class_='fancybox-image').get('src')
except AttributeError:
return None
# Use the base url to create an absolute url
img_url = f'https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/{img_url_rel}'
return img_url
def mars_facts():
# Add try/except for error handling
df = pd.read_html('http://space-facts.com/mars/')[0]
# Assign columns and set index of dataframe
df.columns=['Description', 'Mars']
df.set_index('Description', inplace=True)
# Convert dataframe into HTML format, add bootstrap
return df.to_html(classes='table table-striped, table-bordered')
def mars_hemis(browser):
url = (
"https://astrogeology.usgs.gov/search/"
"results?q=hemisphere+enhanced&k1=target&v1=Mars"
)
browser.visit(url)
hemisphere_image_urls = []
for i in range(4):
browser.find_by_css("a.product-item h3")[i].click()
hemi_data = scrape_hemisphere(browser.html)
hemisphere_image_urls.append(hemi_data)
browser.back()
return hemisphere_image_urls
def scrape_hemisphere(html_text):
# parse html text
hemi_soup = soup(html_text, "html.parser")
# adding try/except for error handling
try:
title_elem = hemi_soup.find("h2", class_="title").get_text()
sample_elem = hemi_soup.find("a", text="Sample").get("href")
except AttributeError:
# Image error will return None, for better front-end handling
title_elem = None
sample_elem = None
hemispheres = {
"title": title_elem,
"img_url": sample_elem
}
return hemispheres
if __name__ == "__main__":
# If running as script, print scraped data
print(scrape_all())
| [
"[email protected]"
] | |
3c8b945e4693315669386c7d5f50e30059747dfa | 89145800ada60f8d2d1b3200b6f384c1a4f8fff8 | /aparcamientos/migrations/0027_auto_20170516_0141.py | 3163e2acafbeaf03e6e8aae738f4821dca33d9f6 | [] | no_license | malozanom/X-Serv-Practica-Aparcamientos | 2f8f2cab9b9ca096ab3209d8fa6579aacbdce593 | d6da3af090aef7e8b0d23add7a5ff76f979d0311 | refs/heads/master | 2021-06-24T17:34:40.930085 | 2019-11-04T18:36:38 | 2019-11-04T18:36:38 | 90,887,116 | 0 | 0 | null | 2017-05-10T16:46:30 | 2017-05-10T16:46:30 | null | UTF-8 | Python | false | false | 440 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('aparcamientos', '0026_auto_20170516_0139'),
]
operations = [
migrations.AlterField(
model_name='preferencia',
name='colorFondo',
field=models.CharField(max_length=50, null=True, blank=True),
),
]
| [
"[email protected]"
] | |
f7bb34e3bb0a8ef728c010baf2173547023c3a37 | 874ae881349dbad7fb74cf8be71bc9a6e321e9a8 | /week1/day4/exercise4.py | 638b09606bdbf90f25b984357140d9fc4b001ae3 | [] | no_license | bluepostit/di-python-2018 | 3e31bc9373797350b695450e4934507647db56b1 | 4bf55cab1d1f96756cbcfc8b3caa2ce8818a4a5b | refs/heads/master | 2020-03-31T16:18:59.498011 | 2018-10-26T14:15:49 | 2018-10-26T14:15:49 | 152,370,126 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,400 | py | # We'll use this function later to get a list of integers
# as a comma-separated string.
# Remember: str.join() function only works on a list of strings.
def get_list_as_string(lst):
return ", ".join(str(x) for x in lst)
# 1 - 3.
user_numbers = []
for i in range(10):
user_number = input("Please enter a whole number between -100 and 100: ")
user_numbers.append(int(user_number))
# 4.
print('~' * 40)
# 6.1
print("Your numbers: " + get_list_as_string(user_numbers))
# 6.2
print(f"The sum of all the numbers: {sum(user_numbers)}")
# 6.3
first_and_last = [user_numbers[0], user_numbers[-1]]
print("First and last: " + get_list_as_string(first_and_last))
# 6.4
without_duplicates = sorted(set(user_numbers))
print("Without duplicates: " + get_list_as_string(without_duplicates))
# 6.5
gt50 = [num for num in user_numbers if num > 50]
print("Numbers greater than 50: " + get_list_as_string(gt50))
# 6.6
st10 = [num for num in user_numbers if num < 10]
print("Numbers smaller than 10: " + get_list_as_string(st10))
# 6.7
squares = [num*num for num in user_numbers]
print("Numbers squared: " + get_list_as_string(squares))
# 6.8
# ...
# 6.9
# There are a few ways we can do this. Here is one:
largest = sorted(user_numbers)[-1]
print("The largest number is: {}".format(largest))
# 6.10
# Here is another way:
smallest = min(user_numbers)
print("The smallest number is: {}".format(smallest))
| [
"[email protected]"
] | |
9a4bd441239bf2bb2298a06eb2730845ceecff71 | f99663c4d226da648c1341f0f5d8e422228da105 | /Problems/Arrays&Strings/3Sum.py | 915becf12d3d7e94eb90cfb463550274434ec905 | [] | no_license | PrithviRajMamidala/Leetcode_Solutions | 6d3021f32f186956cd8370f0b68ba815757daeb9 | 6a43413dbdeccd0fafdcecb9945b9ddb550741bf | refs/heads/master | 2022-11-20T06:43:03.010043 | 2020-07-10T21:02:03 | 2020-07-10T21:02:03 | 272,126,826 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 861 | py | def threeSum(nums):
nums.sort()
value = 0
Res = []
for i in range(len(nums)-2):
if i > 0 and nums[i] == nums[i-1]:
continue
target = nums[i]
L = i + 1
R = len(nums)-1
while L < R:
Total_sum = target+nums[L]+nums[R]
if Total_sum < value:
L += 1
elif Total_sum > value:
R -= 1
elif Total_sum == value:
Res.append([target, nums[L], nums[R]])
while L < R and nums[L] == nums[L+1]:
L += 1
while L < R and nums[R] == nums[R-1]:
R -= 1
L += 1
R -= 1
return Res
if __name__ == '__main__':
print(threeSum([-1, 0, 1, 2, -1, -4]))
print(threeSum([0,0,0,0]))
| [
"[email protected]"
] | |
129b26bfccdea76577f53db996b83c58eab8ec7c | f78dcab43a7664ea0f0dde462848f6b0b32f5baf | /CodeProjects/code/ex5/ex5-env/bin/pycodestyle | d09b64f625bd41fa4f95e7211dd2d7491f4ff11e | [] | no_license | foster99/python-job | 420600bb0a38c491712a05dcf73e44dd1f7279b6 | 44c27da826074e70f28db485c4817f819f61aa9e | refs/heads/main | 2023-08-15T23:36:41.375037 | 2021-10-21T21:04:51 | 2021-10-21T21:04:51 | 417,148,877 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 270 | #!/home/edgar/Escritorio/python-job/CodeProjects/code/ex5/ex5-env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pycodestyle import _main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(_main())
| [
"[email protected]"
] | ||
8820d2dfe7699e0cd9977aa0318e0158671234e0 | fd3236d271d1a9a3c47726cbcba890c1a02770c1 | /Assignment-the-third/demultiplex.py | 3ebbc05d8ba80b2c8ada26dcc6d5c84b5068411c | [] | no_license | bgpalmer/Demultiplex | ff6d44a1a1cdd6ac481f540622566a4e6bc6f093 | 1d1add3a506f691b8835e4905301fb2666d1fbc7 | refs/heads/master | 2023-07-11T06:06:58.770776 | 2021-08-14T03:03:43 | 2021-08-14T03:03:43 | 389,775,328 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,676 | py | #!/usr/bin/env python3
import bioinfo as bio
import argparse
# import pathlib
# from collections import Counter
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-idx", "--index_file", type=str, help="indexes")
parser.add_argument("-i1", "--index_1", type=str, help="index 1")
parser.add_argument("-r1", "--read_1", type=str, help="read 1")
parser.add_argument("-i2", "--index_2", type=str, help="index 2")
parser.add_argument("-r2", "--read_2", type=str, help="read 2")
parser.add_argument("-qc", "--qscore_cutoff", type=str, help="read average quality score cutoff")
parser.add_argument("-o", "--output_dir", type=str, help="Output Directory")
args = parser.parse_args()
# indexes
INDEXES = args.index_file
# output directory
OUTPUT_DIR = args.output_dir
# NOTE: the directory should already be made
# if the output directory does not exits, make it
# if not os.path.isdir(OUTPUT_DIR):
# os.path.mkdir(OUTPUT_DIR)
# Load indexes into map
# Key: Sequence (eg. AAAAAAAA)
# Value: Index Name (eg. F12)
index_map: dict = dict()
with open(INDEXES, 'r') as f:
ln = 0
for line in f:
ln += 1
if (ln > 1):
line = line.strip()
cols = line.split('\t')
index_map[cols[4]] = cols[3]
read_index_match_map: dict = {}
read_nonmatch_bucket_map: dict = {}
read_counter: dict = {}
reads = ['R1', 'R4']
nonmatch_categories = ['Unmatched', 'Unknown']
# Open file handles for:
# - Read-Pair Matches
# - Index hopping ('Unmatched')
# - Unrecognized indexes, N's in indexes ('Unknown')
# Total file counts = 52
#
# Additionally, initialize counters for all file writes above
for read in reads:
read_index_match_map[read] = {}
read_nonmatch_bucket_map[read] = {}
for index_seq, index_name in index_map.items():
read_index_match_map[read][index_seq] = open(f"{OUTPUT_DIR}/{read}_{index_name}.fq", 'w')
read_counter[index_seq] = 0
for category in nonmatch_categories:
read_nonmatch_bucket_map[read][category] = open(f"{OUTPUT_DIR}/{read}_{category}.fq", 'w')
read_counter[category] = 0
INDEX1_FILE = args.index_1
INDEX2_FILE = args.index_2
READ1_FILE = args.read_1
READ2_FILE = args.read_2
QUALITY_SCORE_AVG = float(args.qscore_cutoff)
# Read one record from Read 1-2 and Index 1-2 at a time
for index1_record, index2_record, read1_record, read2_record in zip(bio.get_fastq_records(INDEX1_FILE), bio.get_fastq_records(INDEX2_FILE), bio.get_fastq_records(READ1_FILE), bio.get_fastq_records(READ2_FILE)):
# Read2 and Index2 are reverse complemented
index2_record[1] = bio.reverse_complement(index2_record[1])
read2_record[1] = bio.reverse_complement(read2_record[1])
# Update read headers with indexes (this assumes files are ordered)
# Indexes formatting should be 'Index1-Index2 (reverse complemented)''
read1_record[0] += f" {index1_record[1]}-{index2_record[1]}"
read2_record[0] += f" {index1_record[1]}-{index2_record[1]}"
# check the quality of the indexes
index1_qs: bool = bio.quality_score(index1_record[3]) >= QUALITY_SCORE_AVG
index2_qs: bool = bio.quality_score(index2_record[3]) >= QUALITY_SCORE_AVG
'''
This is where file-writing occurs.
Additionally, each write per read-pair index
and nonmatch/unknown categories will be counted.
Under no circumstance should a read be skipped or not written to a file!!!
'''
# Read-Pairs must have known indexes and indexes with good quality scores
if index2_record[1] in index_map and index1_record[1] in index_map and index1_qs and index2_qs:
if index2_record[1] == index1_record[1]:
read_index_match_map["R1"][index1_record[1]].write('\n'.join(line for line in read1_record) + '\n')
read_index_match_map["R4"][index2_record[1]].write('\n'.join(line for line in read2_record) + '\n')
read_counter[index1_record[1]] += 1
else:
# If the indexes do not match, write to respective index-hopping files
read_nonmatch_bucket_map["R1"]["Unmatched"].write('\n'.join(line for line in read1_record) + '\n')
read_nonmatch_bucket_map["R4"]["Unmatched"].write('\n'.join(line for line in read2_record) + '\n')
read_counter["Unmatched"] += 1
else:
# We did not meet the critera above; update the unknown file
# Improvement: do not need the if statements...
if index1_record[1] not in index_map or not index1_qs:
read_nonmatch_bucket_map["R1"]["Unknown"].write('\n'.join(line for line in read1_record) + '\n')
if index2_record[1] not in index_map or not index2_qs:
read_nonmatch_bucket_map["R4"]["Unknown"].write('\n'.join(line for line in read2_record) + '\n')
read_counter["Unknown"] += 1
# Close the file handles
for read in reads:
for index in index_map.keys():
read_index_match_map[read][index].close()
for category in nonmatch_categories:
read_nonmatch_bucket_map[read][category].close()
'''
Output two tables of stats to stdout deliminated with tabs.
Table 1: Read-Pair Match Data
Headers:
- Index: Index Name (eg. F12)
- Count: Number of index-pair write
- Percentage: Percentage of total index-pair writes
Table 2: Reads seen that do not fit Table 1
Headers:
- Category: Categories (eg. examples above)
- Count: Number of values for category
- Percentage: Percentage of value for ALL reads seens
'''
print("Index\tCount\tPercentage")
total: int = sum(read_counter.values())
read_count: int = total - read_counter["Unknown"] - read_counter["Unmatched"]
for index_seq, index_name in index_map.items():
print(f"{index_name}\t{read_counter[index_seq]}\t{(read_counter[index_seq]/read_count) * 100:.2f}")
print()
print("Category\tCount\tPercentage")
for nonmatch in nonmatch_categories:
print(f"Read-Pair {nonmatch}\t{read_counter[nonmatch]}\t{(read_counter[nonmatch]/total) * 100:.2f}")
print(f"Read-Pair Matches\t{read_count}\t{(read_count / total) * 100:.2f}")
print(f"Total Reads\t{total}\t{(total / total) * 100:.2f}")
| [
"[email protected]"
] | |
b9d34811544914551d0bbd43765d8d7b187a9b20 | 57168e1194f99c289115706c90a7840ec36fd924 | /rb2py/string.py | 5c8d4fb3656eed7dd19ae4caeb8f6363037bc69b | [
"Zlib"
] | permissive | anilktechie/rb2py | a9ce1def6c3fb69314ed1621465c4a4e474f6a7c | c4407a3b6c8285279c8f0805677a484f8ca330d6 | refs/heads/master | 2021-06-05T17:53:02.768552 | 2016-10-19T12:08:31 | 2016-10-19T12:08:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,856 | py | # Custom mutable string class, emulating some of Ruby's method
# Many comments are copied from Ruby's documentation
__all__ = ['String']
import hashlib
import re
from collections import namedtuple
from copy import copy
from functools import total_ordering
import rb2py.pack
from rb2py.error import *
from rb2py.pathname import Pathname
from rb2py.string_succ import succ
from rb2py.symbolcls import Symbol
# Converting to string
StrConvert = namedtuple('StrConvert', 'type convert')
CONVERTERS_TO_STR = (
StrConvert(Symbol, lambda s: str(s)),
StrConvert(int, lambda s: str(s)),
StrConvert(float, lambda s: str(s)),
StrConvert(Pathname, lambda s: str(s))
)
def convert_to_str(other):
for converter in CONVERTERS_TO_STR:
if isinstance(other, converter.type):
return converter.convert(other)
raise Rb2PyValueError('Expected str, but {!r} found'.format(type(other)))
# We can use "bytes" as a name
Bytes = bytes
@total_ordering
class String:
def __init__(self, other=None, *, encoded_str=None, encoding='latin1', bytes=None, hash=None):
self._encoded_str = encoded_str
self._encoding = encoding
self._bytes = bytes
self._hash = hash
if other is not None:
if isinstance(other, String):
self._encoded_str = copy(other._encoded_str)
self._encoding = other._encoding
if self._encoding == "ASCII-8BIT":
self._encoding = "latin1"
self._bytes = copy(other._bytes)
self._hash = other._hash
elif isinstance(other, Bytes):
self._bytes = list(other)
elif isinstance(other, str):
self._encoded_str = other
else:
self._encoded_str = convert_to_str(other)
elif encoded_str is None and bytes is None:
# Empty string
self._bytes = []
@staticmethod
def from_byte_list(byte_list):
if not isinstance(byte_list, list):
byte_list = list(byte_list)
string = String(bytes=byte_list)
return string
# Represents the bytes valid string in the current encoding?
@property
def _valid(self):
self._try_to_encode_str()
return self._encoded_str is not None
def __add__(self, other):
if not isinstance(other, String):
other = String(other)
other._ensure_bytes()
result = String(self, encoding=self._encoding)
result._modify_bytes()
result._bytes += other._bytes
return result
def __eq__(self, other):
if isinstance(other, String):
self._ensure_bytes()
other._ensure_bytes()
return self._bytes == other._bytes
else:
if self._valid:
# Invalid string cannot ever equal to anything
return self._encoded_str == other
return False
def __getitem__(self, index):
if self._encoding == 'latin1': # binary string
self._ensure_bytes()
return String(bytes=self._bytes.__getitem__(index), encoding=self._encoding)
if self._valid:
return String(self._encoded_str.__getitem__(index), encoding=self._encoding)
else:
return String(bytes=self._bytes.__getitem__(index), encoding=self._encoding)
def set_index(self, *args):
value = args[-1]
if value.encoding() != self.encoding():
value = String(value)
value._set_encoding(self._encoding)
value._ensure_bytes()
self._ensure_bytes()
result = String(bytes=self._bytes, encoding=self._encoding)
result._modify_bytes()
arg_count = len(args)
if arg_count == 2:
index = args[0]
result._bytes[index] = value._bytes
elif arg_count == 3:
start = args[0]
if start < 0:
start += len(result._bytes)
stop = start + args[1]
result._bytes[start:stop] = value._bytes
else:
raise Rb2PyValueError("String.set_index() unsupported argument count: {}".format(arg_count))
return result
def __hash__(self):
if self._hash is None:
self._ensure_bytes()
self._hash = hash(tuple(self._bytes))
return self._hash
def __len__(self):
if self._valid:
return len(self._encoded_str)
else:
return self.byte_len()
def __lt__(self, other):
self._ensure_bytes()
other._ensure_bytes()
return self._bytes < other._bytes
# printf-style formatting
def __mod__(self, args):
self._ensure_encoded_str()
if isinstance(args, list):
args = tuple(args)
string = self._encoded_str % args
return String(string, encoding=self._encoding)
def __mul__(self, count):
self._ensure_encoded_str()
return String(encoded_str=self._encoded_str*count, encoding=self._encoding)
def __str__(self):
self._ensure_encoded_str()
return self._encoded_str
def __repr__(self):
self._ensure_encoded_str()
return repr(self._encoded_str)
# To an array
def to_a(self):
return [self]
# To String instance
def to_s(self):
return self
# Ensures valid self._bytes array
def _ensure_bytes(self):
if self._bytes is None:
if self._encoded_str:
try:
self._bytes = list(self._encoded_str.encode(str(self._encoding)))
except UnicodeEncodeError:
# Try to switch the encoding
if self._encoding == 'latin1': self._encoding = 'UTF-8'
else: self._encoding = 'latin1'
# elif self._encoding == 'UTF-8': self._encoding = 'latin1'
# else: raise Rb2PyNotImplementedError("Unknown encoding {}".format(self._encoding))
self._bytes = list(self._encoded_str.encode(str(self._encoding)))
else:
self._bytes = []
def _ensure_encoded_str(self):
if not self._valid:
info = bytes(self._bytes).decode('latin1')
raise Rb2PyRuntimeError('Invalid string {!r}'.format(info))
def _modify_bytes(self):
self._ensure_bytes()
self._encoded_str = None
self._hash = None
def _modify_encoded_str(self):
self._ensure_encoded_str()
self._bytes = None
self._hash = None
def _try_to_encode_str(self):
if self._encoded_str is None:
try:
self._encoded_str = bytes(self._bytes).decode(str(self._encoding))
except UnicodeDecodeError:
self._encoded_str = bytes(self._bytes).decode('latin1')
def append(self, object):
if isinstance(object, String):
if self._encoding == 'latin1' or object._encoding == 'latin1':
object._ensure_bytes()
self._modify_bytes()
self._bytes += object._bytes
else:
object._ensure_encoded_str()
self._modify_encoded_str()
self._encoded_str += object._encoded_str
elif isinstance(object, str):
self._modify_encoded_str()
self._encoded_str += object
elif isinstance(object, int):
self._modify_encoded_str()
self._encoded_str += chr(object)
else:
raise Rb2PyValueError("Unknown type '%s' for string concatenation." % type(object))
return self
concat = append
def append_byte(self, byte):
if byte < 0 or byte > 255:
raise Rb2PyValueError("String.append_byte requires integer > 0 and <= 255, value '{}' found".format(byte))
self._modify_bytes()
self._bytes.append(byte)
def append_bytes(self, bytes):
for byte in bytes:
self.append_byte(byte)
def append_char(self, char):
self._modify_encoded_str()
self._encoded_str += char
def binary_substring(self, index, length):
self._ensure_bytes()
return String(bytes=self._bytes[index:index + length], encoding=self._encoding)
def byte_len(self):
self._ensure_bytes()
return len(self._bytes)
# string.chomp(separator=$/) -> new_string
# Returns a new String with the given record separator removed from the end of str (if present).
# If $/ has not been changed from the default Ruby record separator,
# then chomp also removes carriage return characters (that is it will remove \n, \r, and \r\n).
# If $/ is an empty string, it will remove all trailing newlines from the string.
def chomp(self):
self._ensure_encoded_str()
new_str = self._encoded_str.lstrip("\n\r")
result = String(new_str, encoding=self._encoding)
return result
# string.chop() -> string
# Returns a new String with the last character removed. If the string ends
# with \r\n, both characters are removed. Applying chop to an empty string
# returns an empty string.
def chop(self):
result = String(self)
result.beware_chop()
return result
def beware_chop(self):
if self.is_empty():
return None
self._modify_encoded_str()
if len(self) > 1 and self._encoded_str.endswith("\r\n"):
self._encoded_str = self._encoded_str[:-2]
self._encoded_str = self._encoded_str[:-1]
return self
def bytes(self):
self._ensure_bytes()
return self._bytes
each_byte = bytes
def codepoints(self):
self._ensure_encoded_str()
return [ord(c) for c in self._encoded_str]
each_codepoint = codepoints
# Instantiate String from text which corresponds to Ruby's rules for double quoted strings,
# which slightly differ from those for Python normal strings.
# If the string contains \xZY code, it is assumed to be binary.
# If the string contains \uABCD code, it is assumed to be UTF-8.
SIMPLE_CHARS = {
'a': '\a', # bell, ASCII 07h (BEL)
'b': '\b', # backspace, ASCII 08h (BS)
'f': '\f', # form feed, ASCII 0Ch (FF)
't': '\t', # horizontal tab, ASCII 09h (TAB)
'n': '\n', # newline (line feed), ASCII 0Ah (LF)
'v': '\v', # vertical tab, ASCII 0Bh (VT)
'r': '\r', # carriage return, ASCII 0Dh (CR)
'\\': '\\',# backslash, \
'"': '"', # double quote "
's': ' ', # space
}
HEXCHARS = tuple('0123456789abcdef')
@staticmethod
def double_quoted(characters):
result = String()
i = 0
while i < len(characters):
char = characters[i].lower()
i += 1
if char == '\\':
if i >= len(characters):
raise Rb2PyValueError("Double quoted string cannot end with \\ {!r}".format(characters))
char = characters[i].lower()
i += 1
if char in String.SIMPLE_CHARS:
result.append_char(String.SIMPLE_CHARS[char])
elif char == 'x':
if i >= len(characters):
raise Rb2PyValueError("Invalid hex escape {!r}".format(characters))
char = characters[i].lower()
i += 1
if not char in String.HEXCHARS:
raise Rb2PyValueError("Invalid hex escape {!r}".format(characters))
if result._encoding != 'latin1':
result._set_encoding('latin1')
first_nibble = String.HEXCHARS.index(char)
if i < len(characters) and characters[i].lower() in String.HEXCHARS:
second_nibble = String.HEXCHARS.index(characters[i].lower())
i += 1
result.append_byte((first_nibble << 4) + second_nibble)
else:
result.append_byte(first_nibble)
elif char == 'u':
value = 0
for _ in range(4):
if i >= len(characters):
raise Rb2PyValueError("invalid Unicode escape in string {!r}".format(characters))
char = characters[i].lower()
if not char in String.HEXCHARS:
raise Rb2PyValueError("Invalid Unicode escape {!r} in string {!r}".format(char, characters))
i += 1
value = (value << 4) + String.HEXCHARS.index(char)
result.append_char(chr(value))
else:
raise Rb2PyValueError("Unknown escape {!r} in string {!r}".format(char, characters))
else:
result.append_char(characters[i-1])
return result
def is_empty(self):
if self._encoded_str is not None:
return len(self._encoded_str) == 0
if self._bytes is None:
return True
return len(self._bytes) == 0
def encode(self, encoding):
string = String(self)
string._ensure_encoded_str()
string._encoding = encoding
string._bytes = None
string._ensure_bytes()
return string
def encoded_substring(self, index, length, encoding):
self._ensure_bytes()
# If it is whole string, we don't need to make substring
if index == 0 and length == len(self._bytes):
# The encoding corresponds to ours, so we can buffer the conversion
if encoding is None or encoding == self._encoding:
self._ensure_encoded_str()
# return copy
return String(self)
string = self
else:
string = self.binary_substring(index, length)
string = String(bytes=string._bytes, encoding=encoding)
return string
def encoding(self):
return self._encoding
def _set_encoding(self, encoding):
self._ensure_encoded_str()
self._bytes = None
if encoding == "ASCII-8BIT":
encoding = "latin1"
self._encoding = encoding
self._ensure_bytes()
def first(self):
return self
def force_encoding(self, encoding):
if encoding == "ASCII-8BIT":
encoding = "latin1"
self._encoding = encoding
if self._bytes is None:
self._bytes = [ord(char) for char in self._encoded_str]
self._encoded_str = None
self._hash = None
return self
# str[regexp, capture] -> new_str or nil
# If a Regexp is supplied, the matching portion of the string is returned.
# If a capture follows the regular expression, which may be a capture group index or name,
# follows the regular expression that component of the MatchData is returned instead.
# Returns nil if the regular expression does not match.
def get_indices_regexp(self, regexp, capture):
captures = []
if self.match(regexp, captures):
return captures[capture]
else:
return None
# Global substitution
# gsub(pattern, replacement) -> new_str
def gsub(self, pattern, replacement):
self._ensure_encoded_str()
if isinstance(pattern, String):
string = self._encoded_str.replace(str(pattern), str(replacement))
else:
string = pattern.sub(str(replacement), self._encoded_str)
return String(encoded_str=string, encoding=self._encoding)
def hexdigest(self, algorithm_name):
h = hashlib.new(algorithm_name)
self._ensure_bytes()
h.update(bytes(self._bytes))
return h.hexdigest()
def is_ascii_only(self):
self._ensure_encoded_str()
for char in self._encoded_str:
if ord(char) > 127:
return False
return True
def join(self, array):
self._ensure_encoded_str()
result = self._encoded_str.join(str(item) for item in array)
return String(result, encoding=self._encoding)
def lower(self):
self._ensure_encoded_str()
result = String(encoding=self._encoding)
for char in self._encoded_str:
if 'A' <= char <= 'Z':
result.append_char(chr(ord(char) - ord('A') + ord('a')))
else:
result.append_char(char)
return result
def upper(self):
self._ensure_encoded_str()
result = String(encoding=self._encoding)
for char in self._encoded_str:
if 'a' <= char <= 'z':
result.append_char(chr(ord(char) - ord('a') + ord('A')))
else:
result.append_char(char)
return result
# =~ operator
# =~ is Ruby's basic pattern-matching operator. When one operand is a regular expression and the other
# is a string then the regular expression is used as a pattern to match against the string.
# If a match is found, the operator returns index of first match in string, otherwise it returns nil.
#
# /hay/ =~ 'haystack' #=> 0
# 'haystack' =~ /hay/ #=> 0
# /a/ =~ 'haystack' #=> 1
# /u/ =~ 'haystack' #=> nil
#
# rb2py_regexp_captures is an array where rb2py_regexp_captures[1], rb2py_regexp_captures[2]... corresponds
# to $1, $2... method-local Ruby variables (group captures)
def match(self, regexp, regexp_captures=None):
self._ensure_encoded_str()
if regexp_captures is not None:
regexp_captures.clear()
match_object = regexp.search(self._encoded_str)
if match_object:
if regexp_captures is not None:
# $0 in Ruby is script name, not regexp capture
# We just add emptry String for simplicity
regexp_captures.append(String(encoding=self._encoding))
regexp_captures.extend(String(group, encoding=self._encoding) for group in match_object.groups())
return match_object.start()
else:
return None
# scan(pattern) -> array
# scan(pattern) {|match, ...| block } -> str
# Both forms iterate through str, matching the pattern (which may be a Regexp or a String).
# For each match, a result is generated and either added to the result array or passed to the block.
# If the pattern contains no groups, each individual result consists of the matched string, $&.
# If the pattern contains groups, each individual result is itself an array containing one entry per group.
def scan(self, regexp, block=None):
self._ensure_encoded_str()
matches = regexp.findall(self._encoded_str)
if len(matches):
if regexp.groups == 1:
# In Ruby, group in regexp always generates array, even with single object.
# In Python only multiple groups generate tuple.
# E.g.
# re.findall('.', 'abc') == ['a', 'b', 'c'] == re.findall('(.)', 'abc')
# but
# 'abc'.scan(/./) == ["a", "b", "c"]
# 'abc'.scan(/(.)/) == [["a"], ["b"], ["c"]]
matches = [[String(match, encoding=self._encoding)] for match in matches]
elif regexp.groups > 1:
# Convert tuples to lists
matches = [list([String(group, encoding=self._encoding) for group in match]) for match in matches]
else:
# Pattern without groups. Just convert it to String instances
matches = [String(match, encoding=self._encoding) for match in matches]
if block:
for match in matches:
block(match)
# The block form returns the original string
return self
else:
return matches
def slice(self, object, extra=None):
if isinstance(object, int):
self._ensure_encoded_str()
if extra is not None:
if not isinstance(extra, int):
raise Rb2PyValueError("String.slice(index, length) length is not and int!")
extra += extra + 1
if extra > len(self._encoded_str):
extra = len(self._encoded_str)
else:
extra = len(self._encoded_str)
return self.slice_range(range(object, extra))
if isinstance(object, range):
if extra:
raise Rb2PyValueError("Extra parameter for String.slice(range)")
return self.slice_range(object)
if isinstance(object, re._pattern_type):
if extra:
raise Rb2PyNotImplementedError("String.slice does not implement capture parameter for regexp")
return self.slice_regexp(object)
raise Rb2PyNotImplementedError("Slice for parameter of type '{}'".format(type(object)))
# slice(range) -> new_str or nil
# If passed a range, its beginning and end are interpreted as offsets delimiting the substring to be returned.
# NOT IMPLEMENTED: Returns nil if the initial index falls outside the string or the length is negative.
def slice_range(self, range):
self._ensure_encoded_str()
return String(self._encoded_str[range.start:range.stop:range.step], encoding=self._encoding)
# slice(regexp) -> new_str or nil
# If a Regexp is supplied, the matching portion of the string is returned.
def slice_regexp(self, regexp):
self._ensure_encoded_str()
match = regexp.search(self._encoded_str)
if match:
return String(match.group(), encoding=self._encoding)
return None
def split(self, regexp=None):
self._ensure_encoded_str()
s = self._encoded_str
# Ruby docs: If pattern is omitted, the value of $; is used.
# If $; is nil (which is the default), str is split on whitespace
if regexp is None:
result = s.split()
# Not really a regexp, just plain string
elif String.is_string(regexp):
# Ruby special case
if regexp == ' ':
result = s.split()
else:
result = s.split(str(regexp))
# Fix for Python split() regexp bug. See:
# https://docs.python.org/3/library/re.html#re.split
elif regexp.pattern == '':
result = [char for char in s]
else:
# Finally standard regexp split() :)
result = regexp.split(s)
# Ruby docs: If the limit parameter is omitted, trailing null fields are suppressed.
# Ruby C code:
# if (NIL_P(limit) && lim == 0) {
# long len;
# while ((len = RARRAY_LEN(result)) > 0 && (tmp = RARRAY_AREF(result, len-1), RSTRING_LEN(tmp) == 0))
# rb_ary_pop(result);
# }
while len(result) > 0 and result[-1] == '':
result.pop()
return [String(chunk, encoding=self._encoding) for chunk in result]
def is_start_with(self, other):
self._ensure_encoded_str()
return self._encoded_str.startswith(str(other))
@staticmethod
def is_string(object):
return isinstance(object, str) or isinstance(object, String)
def strip(self):
self._ensure_encoded_str()
return String(self._encoded_str.strip(), encoding=self._encoding)
# lstrip! -> self or nil
# Removes leading whitespace from str, returning nil if no change was made.
def beware_lstrip(self):
self._ensure_encoded_str()
stripped = self._encoded_str.lstrip()
if stripped == self._encoded_str:
# No change
return None
else:
self._encoded_str = stripped
return self
def rstrip(self):
self._ensure_encoded_str()
return String(self._encoded_str.rstrip(), encoding=self._encoding)
# Returns the successor to <i>str</i>. The successor is calculated by
# incrementing characters starting from the rightmost alphanumeric (or
# the rightmost character if there are no alphanumerics) in the
# string. Incrementing a digit always results in another digit, and
# incrementing a letter results in another letter of the same case.
# Incrementing nonalphanumerics uses the underlying character set's
# collating sequence.
#
# If the increment generates a ``carry,'' the character to the left of
# it is incremented. This process repeats until there is no carry,
# adding an additional character if necessary.
#
# "abcd".succ #=> "abce"
# "THX1138".succ #=> "THX1139"
# "<<koala>>".succ #=> "<<koalb>>"
# "1999zzz".succ #=> "2000aaa"
# "ZZZ9999".succ #=> "AAAA0000"
# "***".succ #=> "**+"
def beware_succ(self):
self._modify_encoded_str()
self._encoded_str = succ(self._encoded_str)
def pack(self, array, format):
format = str(format)
self._ensure_bytes()
formats = []
count = None
for format_char in format:
if format_char.isspace():
continue
if format_char.isalpha():
if count:
formats[-1].count = count
count = None
formats.append(rb2py.pack.get_packer(format_char))
elif format_char.isdigit():
if count:
count = count * 10 + int(format_char)
else:
count = int(format_char)
elif format_char == '*':
if count:
Rb2PyValueError('Unrecognized format "%s"' % format)
count = '*'
else:
raise Rb2PyValueError('Unrecognized format "%s"' % format)
if count:
formats[-1].count = count
index = 0
for format in formats:
elements = array[index:]
format.pack(elements, self)
index += format.total_count
return self
def unpack(self, format):
format = str(format)
self._ensure_bytes()
formats = []
count = None
for format_char in format:
if format_char.isspace():
continue
if format_char.isalpha():
if count is not None:
formats[-1].count = count
count = None
formats.append(rb2py.pack.get_packer(format_char))
elif format_char.isdigit():
if count:
count = count * 10 + int(format_char)
else:
count = int(format_char)
elif format_char == '*':
if count:
Rb2PyValueError('Unrecognized format "%s"' % format)
count = '*'
else:
raise Rb2PyValueError('Unrecognized format "%s"' % format)
if count is not None:
formats[-1].count = count
index = 0
result = []
for packer in formats:
packer.unpack(self._bytes[index:], result)
index += packer.total_byte_size
return result
def replace(self, other):
if not isinstance(other, String):
raise Rb2PyNotImplementedError("String.replace")
other._ensure_bytes()
self._modify_bytes()
self._bytes = other._bytes
self._encoding = other._encoding
return self
def write_to_file(self, file):
self._ensure_bytes()
file.write(bytes(self._bytes))
| [
"Michal Molhanec"
] | Michal Molhanec |
45b53a61145d6489a9a591e576d1b988773b8c51 | 36741ab59a903051b8c86864f98c36f6e66e2250 | /lesson07.py | 809ae04b3531bcaeaefec87fd758a5ca15713165 | [] | no_license | deceptikon/oneclick | 23802a1bea95251d76475630f22a26c230f125de | 4126628a8d15db620fefcdb388035acd9016f366 | refs/heads/master | 2021-06-24T07:35:39.460186 | 2021-04-09T14:41:50 | 2021-04-09T14:41:50 | 218,316,831 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,205 | py | # # Урок 7
# ########
#
# # def my_func:
# # pass
# #
# # class Creature:
# # pass
#
# # class Creature:
# # age = 0
# # sex = 'Male'
# #
# # suschestvo = Creature() # создаст объект класса
# # bad_creature = Creature # НЕ ДЕЛАТЬ будет просто ссылаться на сам класс
# # print(type(suschestvo), type(bad_creature))
# #
# # print(suschestvo.sex)
# # suschestvo.test = 'test'
# # print(suschestvo.test)
# #
# # eshe_suschestvo = Creature()
# # eshe_suschestvo.sex = 'Female'
# # #
# # print('Существо: ', suschestvo.sex)
# # print('Еще существо: ', eshe_suschestvo.sex)
#
# M = 'Male'
# F = 'Female'
#
# class Creature:
# age = 0
# sex = 'Male'
#
# def __init__(self, age, sex = 'Male'):
# self.age = age
# self.sex = sex
#
# def show(self):
# print('Это существо, ему', self.age, 'лет и его пол -', self.sex)
# #
# # def __del__(self):
# # print('Сейчас существо будет удалено')
# #
# # s = Creature()
# # s = Creature(5, M)
# # s.show()
# # del(s)
# #
# # while True:
# # pass
# #
# #
# class Man(Creature):
# def __init__(self, age):
# self.sex = M
# self.age = age
# #
# def show(self):
# print('Это мужчина, ему', self.age, 'лет')
#
#
# class Woman(Creature):
# __can_fly = False
# #
# def __init__(self, age):
# self.sex = F
# self.name = 'Mary'
# self.age = age
# #
# def show(self):
# print('Это женщина, ей', self.age, 'лет')
# print('Защищенное свойство', self.__can_fly)
# #
# def normal_method(self):
# self.__protected_method()
#
# def __protected_method(self):
# print('Это защищенный метод, мы не можем его вызвать извне')
# #
# muzhik = Man(48)
# muzhik.show()
# #
# fem = Woman(12)
# fem.show()
# print(fem.age)
# fem.normal_method()
# # fem.__protected_method()
# fem.age = 100
# print(fem.age)
# print(fem.__can_fly)
# создать класс Worker c методом work и свойством salary, создать класс Driver,
# наследующийся от класса Worker, переопределить метод work(), добавить в Driver
# конструктор, который выставляет свойство salary по умолчанию отличное от родительского класса
# добавить деструктор в родительский класс
# создать объекты обеих классов, вызвать методы work
class Worker:
salary = 5000
def work(self):
print('Зарплата: ', self.salary)
def __del__(self):
print('Это деструктор')
class Driver(Worker):
# salary = 300
def __init__(self):
self.salary = 500
def work(self):
print('Зарплата водителя: ', self.salary)
obj_worker = Worker()
obj_driver = Driver()
obj_worker.work()
obj_driver.work()
obj_driver.salary = 15000
obj_driver.work()
| [
"[email protected]"
] | |
225cfb2e22470170ea6dd0d2f269caea5c0a1559 | 7f44476812eb4f67cfc09bb1a983456cfaf769c2 | /code/numpy_neural_net.py | ea21cde783d7a9b209d0d9a7b2bd9a9a53e33adc | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | jasonwei20/jasonwei20.github.io | 4ccba20594f81d1c0293b570b58456b47fb44a44 | cddac426b42f90e3363ef5ae35a296d800fc39ad | refs/heads/master | 2023-01-19T06:37:05.686731 | 2023-01-15T04:33:23 | 2023-01-15T04:33:23 | 120,841,808 | 3 | 9 | MIT | 2022-07-22T01:17:11 | 2018-02-09T01:47:52 | HTML | UTF-8 | Python | false | false | 5,558 | py | #Big credits: https://towardsdatascience.com/lets-code-a-neural-network-in-plain-numpy-ae7e74410795
import numpy as np
def sigmoid(Z):
return 1/(1+np.exp(-Z))
def relu(Z):
return np.maximum(0,Z)
def sigmoid_backward(dA, Z):
sig = sigmoid(Z)
return dA * sig * (1 - sig)
def relu_backward(dA, Z):
dZ = np.array(dA, copy=True)
dZ[Z <= 0] = 0
return dZ
def convert_prob_into_class(probs):
probs_ = np.copy(probs)
probs_[probs_ > 0.5] = 1
probs_[probs_ <= 0.5] = 0
return probs_
def get_activation(activation):
if activation == "relu":
return relu
elif activation == "relu_backward":
return relu_backward
elif activation == "sigmoid":
return sigmoid
elif activation == "sigmoid_backward":
return sigmoid_backward
else:
raise Exception('Non-supported activation function', activation)
def get_cost_value(Y_hat, Y):
m = Y_hat.shape[1]
cost = -1 / m * (np.dot(Y, np.log(Y_hat).T) + np.dot(1 - Y, np.log(1 - Y_hat).T))
return float(np.squeeze(cost))
def get_accuracy_value(Y_hat, Y):
Y_hat_ = convert_prob_into_class(Y_hat)
return (Y_hat_ == Y).all(axis=0).mean()
nn_architecture = [
{"input_dim": 2, "output_dim": 4, "activation": "relu"},
{"input_dim": 4, "output_dim": 6, "activation": "relu"},
{"input_dim": 6, "output_dim": 6, "activation": "relu"},
{"input_dim": 6, "output_dim": 4, "activation": "relu"},
{"input_dim": 4, "output_dim": 1, "activation": "sigmoid"},
]
def init_layers(nn_architecture, seed=1):
np.random.seed(seed)
num_layers = len(nn_architecture)
params_values = {}
for idx, layer in enumerate(nn_architecture):
layer_idx = idx + 1
layer_input_size = nn_architecture[idx]["input_dim"]
layer_output_size = nn_architecture[idx]["output_dim"]
params_values[f"W{layer_idx}"] = np.random.randn(layer_output_size, layer_input_size) * 0.3
params_values[f"b{layer_idx}"] = np.random.randn(layer_output_size, 1) * 0.3
return params_values
def forward(A_prev, W_curr, b_curr, activation):
activation_func = get_activation(activation)
Z_curr = np.dot(W_curr, A_prev) + b_curr
A_curr = activation_func(Z_curr)
return A_curr, Z_curr
def network_forward(X, params_values, nn_architecture):
memory = {"A0": X}
A_curr = X
for idx in range(len(nn_architecture)):
layer_idx = idx + 1
A_prev = A_curr
W_curr = params_values[f"W{layer_idx}"]
b_curr = params_values[f"b{layer_idx}"]
activation = nn_architecture[idx]["activation"]
A_curr, Z_curr = forward(A_prev, W_curr, b_curr, activation)
memory[f"A{layer_idx}"] = A_curr
memory[f"Z{layer_idx}"] = Z_curr
return A_curr, memory
def backprop(dA_curr, W_curr, b_curr, Z_curr, A_prev, activation):
m = A_prev.shape[1]
# this will use the chain rule: dZ = dA * g'(Z)
backward_activation_func = get_activation(activation)
# how much the outputs should change (dZ_curr)
# is the product of
# (1) how much the activated output should change (dA_curr)
# (2) (the derivative of the activation function) applied to Z_curr
dZ_curr = backward_activation_func(dA_curr, Z_curr)
# how much the current weights should change (dW_curr)
# is the product of
# (1) how much the output should change (dZ_curr)
# (2) the input for this layer (A_prev)
dW_curr = np.dot(dZ_curr, A_prev.T) / m
db_curr = np.sum(dZ_curr, axis=1, keepdims=True) / m
# how much the activated outputs from the previous layer should change
# is the product of
# (1) the weights at this layer (W_curr)
# (2) how much the outputs of this layer should change
dA_prev = np.dot(W_curr.T, dZ_curr)
return dA_prev, dW_curr, db_curr
def network_backprop(Y_hat, Y, memory, params_values, nn_architecture):
grad_values = {}
m = Y.shape[1]
Y = Y.reshape(Y_hat.shape)
dA_prev = - (np.divide(Y, Y_hat)) + np.divide(1 - Y, 1 - Y_hat) # derivative of the cross-entropy loss function
for layer_idx_prev, layer in reversed(list(enumerate(nn_architecture))):
layer_idx_curr = layer_idx_prev + 1
activation_function_curr = str(layer["activation"] + "_backward")
dA_curr = dA_prev
A_prev = memory[f"A{layer_idx_prev}"]
Z_curr = memory[f"Z{layer_idx_curr}"]
W_curr = params_values[f"W{layer_idx_curr}"]
b_curr = params_values[f"b{layer_idx_curr}"]
dA_prev, dW_curr, db_curr = backprop(dA_curr, W_curr, b_curr, Z_curr, A_prev, activation_function_curr)
grad_values[f"dW{layer_idx_curr}"] = dW_curr
grad_values[f"db{layer_idx_curr}"] = db_curr
return grad_values
def update_network(params_values, grad_values, nn_architecture, learning_rate=0.001):
for i, layer in enumerate(nn_architecture):
layer_idx = i + 1
params_values[f"W{layer_idx}"] -= learning_rate * grad_values[f"dW{layer_idx}"]
params_values[f"b{layer_idx}"] -= learning_rate * grad_values[f"db{layer_idx}"]
return params_values
def train_network(X, epochs=3000):
params_values = init_layers(nn_architecture)
cost_history = []
acc_history = []
A_curr, memory = network_forward(X, params_values, nn_architecture)
for epoch in range(epochs):
A_curr, memory = network_forward(X, params_values, nn_architecture)
grad_values = network_backprop(A_curr, Y, memory, params_values, nn_architecture)
params_values = update_network(params_values, grad_values, nn_architecture)
if epoch % 100 == 0:
cost = get_cost_value(A_curr, Y)
cost_history.append(cost)
acc = get_accuracy_value(A_curr, Y)
acc_history.append(acc)
print(A_curr)
print(f"cost_history: {cost_history}")
print(f"acc_history: {acc_history}")
if __name__ == "__main__":
X = np.random.randn(2, 6) * 100
Y = np.array([[0, 0, 0, 1, 1, 1]])
train_network(X) | [
"[email protected]"
] | |
9f6db7b06f164bbe448fbc848fc5f5bc757b6f5c | 6e11ca95e57deff6ee40db3db1c0702a4d47b66f | /Strings/reverse_number.py | c797d8d948c4ddb1a4799b1ee596c2b632f0aa68 | [] | no_license | Jrimmer95/Leetcode- | 38e1ab327d9f5a2dab54653b845b3864e0d5253b | 40a64f066b645f710f3d1f84b9ebcb1f8c2d7d3a | refs/heads/master | 2022-02-03T05:40:24.332294 | 2019-07-22T13:51:13 | 2019-07-22T13:51:13 | 198,221,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 763 | py | """
Given a 32-bit signed integer, reverse digits of an integer.
Note:
Assume we are dealing with an environment which could only store integers within the 32-bit signed integer
range: [−2**31, 2**31 − 1]. For the purpose of this problem, assume that your function returns 0 when the reversed
integer overflows.
"""
class Solution:
def reverse(self, x: int) -> int:
if x >= 0:
x = str(x)
y = ''
y += (x[::-1])
if int(y) > (2**31)-1:
return 0
return int(y)
elif x < 0:
x = x * -1
x = str(x)
y = ''
y += (x[::-1])
if -int(y) < -(2**31):
return 0
return -int(y)
| [
"[email protected]"
] | |
5d0d50d8521b8a43d42e7e003ebaf649fa2a5bff | e2a2e5932e144a6153e1319594e717c1211386eb | /Proyecto1/urls.py | 089404e8f83505ad2996b6c568f685ce7a3f1ee3 | [] | no_license | sonimik13/pildoras | d1d2ccc3467dad5e9289715ae892baf5dc36165f | b041930080ca919132c764568c0e182735024436 | refs/heads/master | 2022-02-28T18:49:10.337161 | 2019-11-22T01:33:28 | 2019-11-22T01:33:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,114 | py | """Proyecto1 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from Proyecto1.views import saludo, despedida, ejemplo3, ejemplo4, dameFecha, calculaEdad
urlpatterns = [ # lista
path('admin/', admin.site.urls),
path('saludo/', saludo),
path('despedida/', despedida),
path('ejemplo3/', ejemplo3),
path('ejemplo4/', ejemplo4),
path('fecha/', dameFecha),
path('edades/<int:edad>/<int:anhoFuturo>', calculaEdad), #con int: le indicamos que agno es entero
]
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.