content
stringlengths
7
1.05M
#!/usr/bin/python3 def recsum(n): return n if n<=1 else n+recsum(n-1) n = int(input("Enter your number\t")) if n < 0: print("Enter a positive number") else: print("The sum is",recsum(n))
class Solution(object): def lengthOfLongestSubstring(self, s): """ :type s: str :rtype: int """ d = {} left = -1 right = 0 max = 0 if len(s) < 2: return len(s) while right < len(s)-1: d[s[right]] = right right += 1 if d.has_key(s[right]) and d[s[right]] > left: left = d[s[right]] if right - left > max: max = right - left return max
# -*- coding: utf-8 -*- """ @Datetime: 2019/1/2 @Author: Zhang Yafei """
#!/usr/bin/env python # -*- coding: utf-8 -*- __author__ = 'MiracleWong' # AOOP 之 多重继承 # 地址:https://www.liaoxuefeng.com/wiki/1016959663602400/1017502939956896 class Animal(object): pass # 大类: class Mammal(Animal): pass class Bird(Animal): pass class Runnable(object): def run(self): print('Running...') class Flyable(object): def fly(self): print('Flying...') # 各种动物: class Dog(Mammal, Runnable): pass class Bat(Mammal, Flyable): pass class Parrot(Bird, Fly): pass class Ostrich(Bird): pass class MyTCPServer(TCPServer, ForkingMixIn): pass class MyUDPServer(UDPServer, ThreadingMixIn): pass class MyTCPServer(TCPServer, CoroutineMixIn): pass
raio = float(input()) pi = 3.14159 VOLUME = (4 / 3) * pi * (raio**3) print("VOLUME = {:.3f}".format(VOLUME))
dias = float(input('Quantos dias o carro foi alugado? ')) km = float(input('Quantos Km rodados? ')) dias = dias * 60 # Da pra fazer melhor km = km * 0.15 print('O valor a pagar pelo aluguel é de {:.2f}'.format(dias + km))
# automatically generated by the FlatBuffers compiler, do not modify # namespace: proto class AuthMethod(object): ANONYMOUS = 0 COOKIE = 1 TLS = 2 TICKET = 3 CRA = 4 SCRAM = 5 CRYPTOSIGN = 6
# Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. class BaseError(Exception): """Base error for all test runner errors.""" def __init__(self, message, is_infra_error=False): super(BaseError, self).__init__(message) self._is_infra_error = is_infra_error def __eq__(self, other): return (self.message == other.message and self.is_infra_error == other.is_infra_error) def __ne__(self, other): return not self == other @property def is_infra_error(self): """Property to indicate if error was caused by an infrastructure issue.""" return self._is_infra_error
km = int(input('Qual a distancia em Km para o destino?')) if km > 200: print('O preço da passagem é de R${}'.format(km * 0.45)) else: print('O preço da passagem é de R${}'.format(km * 0.50))
def for_e(): for row in range(6): for col in range(4): if row==2 or row==1 and col%3!=0 or row==4 and col>0 or col==0 and row==3: print("*",end=" ") else: print(" ",end=" ") print() def while_e(): row=0 while row<6: col=0 while col<4: if row==2 or row==1 and col%3!=0 or row==4 and col>0 or col==0 and row==3: print("*",end=" ") else: print(" ",end=" ") col+=1 row+=1 print()
""" Copyright 2017 ARM Limited Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. EventMatch object. Describes a single matched event for EventMatcher. """ class EventMatch(object): # pylint: disable=too-few-public-methods """ EventMatcher callback object """ def __init__(self, ref, event_data, match): """ :param ref: reference object :param event_data: original event data which matches :param match: re.MatchObject or string depend on EventMatcher configuration """ self.ref = ref self.event_data = event_data self.match = match
# //Faça um programa que leia um número inteiro qualquer e mostre na tela a sua tabuada. # //N = int(input('Digite um Nº para verificar sua tabuada: \n')) # //print('---------------') # //print(N, 'x 0 =', N*0) # //print(N, 'x 1 =', N*1) # //print(N, 'x 2 =', N*2) # //print(N, 'x 3 =', N*3) # //print(N, 'x 4 =', N*4) # //print(N, 'x 5 =', N*5) # //print(N, 'x 6 =', N*6) # //print(N, 'x 7 =', N*7) # //print(N, 'x 8 =', N*8) # //print(N, 'x 9 =', N*9) # //print(N, 'x 10 =', N*10) # //print('---------------') num = int(input("Digite um Nº para verificar sua tabuada: \n")) print('--------------') print('{} x {:2} = {}'.format(num, 0, num*0)) print('{} x {:2} = {}'.format(num, 1, num*1)) print('{} x {:2} = {}'.format(num, 2, num*2)) print('{} x {:2} = {}'.format(num, 3, num*3)) print('{} x {:2} = {}'.format(num, 4, num*4)) print('{} x {:2} = {}'.format(num, 5, num*5)) print('{} x {:2} = {}'.format(num, 6, num*6)) print('{} x {:2} = {}'.format(num, 7, num*7)) print('{} x {:2} = {}'.format(num, 8, num*8)) print('{} x {:2} = {}'.format(num, 9, num*9)) print('{} x {:2} = {}'.format(num, 10, num*10)) print('--------------')
def selection_sort(elements): for i in range(len(elements) - 1): min_index = i for j in range(i + 1, len(elements)): if elements[min_index] > elements[j]: min_index = j elements[i], elements[min_index] = elements[min_index], elements[i] return elements ele = [23, 35, 6, 42, 1, 4, 6, 97, 10] print(selection_sort(ele))
def ValidaCpf(msg='Cadastro de Pessoa Física (CPF): ', pont=True): """ -> Função para validar um CPF :param msg: Mensagem exibida para usuário antes de ler o CPF. :param pont: Se True, retorna um CPF com pontuação (ex: xxx.xxx.xxx-xx). Se False, retorna um CPF sem pontuação (ex: xxxxxxxxxxx) :return: Retorna um CPF válido. """ while True: cpf = str(input(f'{msg}')) if '.-' in cpf and pont == False: cpf.replace('.', '') cpf.replace('-', '') contDig=0 for dig in cpf: if dig.isnumeric(): contDig += 1 # Conta a quantidade de dígitos no CPF if contDig != 11: # Se o CPF possuir mais de 11 dígitos, retorna uma mensagem de erro print('\033[1;31m3RRO! Este CPF é inválido!\033[m') continue # Volta para o tpo do laço if '.' in cpf: # Verifica a existência de pontos no CPF e se a quantidade está correta(2) if cpf.count('.') != 2: print('\033[1;31m3RRO! Este CPF é inválido!\033[m') continue else: # Se não tiver pontos e se pont=True, adiciona a pontuação if pont: cpf = list(cpf) cpf.insert(3, '.') cpf.insert(7, '.') if '-' in cpf: # Verifica a existência do hífen no CPF e se a quantidade está correta(1) if cpf.count('-') != 1: print('\033[1;31m3RRO! Este CPF é inválido!\033[m') continue else: # Se não tiver hífen e se pont=True, adiciona a pontuação if pont: cpf.insert(11, '-') result = [''.join(cpf)] # Junta a lista cpf = result[0] break return cpf cpf = ValidaCpf(msg='CPF: ', pont=True) print(cpf) help(ValidaCpf) # Exibe a DocString da função
tab = 1 while tab <= 10: print("Tabuada do", tab, ":", end="\t") i = 1 while i <= 10: print(tab*i, end = "\t") i = i + 1 print() tab = tab + 1
# -*- coding: utf-8 -*- { 'name': 'GST Pos Order Line Menu', 'version': '12.0.0.4', 'sequence': 1, 'category': 'Sales', "author": "Guadaltech Soluciones tecnológicas S.L.", 'depends': ['point_of_sale', 'tis_catch_weight', 'tis_catch_weight_extension'], 'data': ['views/pos_menu.xml'], 'installable': True, 'auto_install': False, 'application': True }
load("@bazel_skylib//lib:shell.bzl", "shell") def kubebuilder_manifests(name, srcs, config_root, **kwargs): native.genrule( name = name, srcs = srcs, outs = [name + ".yaml"], cmd = """ tmp=$$(mktemp --directory) cp -aL "%s/." "$$tmp" $(location @io_k8s_sigs_kustomize_kustomize_v4//:v4) build "$$tmp/default" > $@ rm -r "$$tmp" """ % config_root, tools = [ "@io_k8s_sigs_kustomize_kustomize_v4//:v4", ], **kwargs ) def _ginkgo_test_impl(ctx): wrapper = ctx.actions.declare_file(ctx.label.name) ctx.actions.write( output = wrapper, content = """#!/usr/bin/env bash set -e exec {ginkgo} {ginkgo_args} {go_test} -- "$@" """.format( ginkgo = shell.quote(ctx.executable._ginkgo.short_path), ginkgo_args = " ".join([shell.quote(arg) for arg in ctx.attr.ginkgo_args]), # Ginkgo requires the precompiled binary end with ".test". go_test = shell.quote(ctx.executable.go_test.short_path + ".test"), ), is_executable = True, ) return [DefaultInfo( executable = wrapper, runfiles = ctx.runfiles( files = ctx.files.data, symlinks = {ctx.executable.go_test.short_path + ".test": ctx.executable.go_test}, transitive_files = depset([], transitive = [ctx.attr._ginkgo.default_runfiles.files, ctx.attr.go_test.default_runfiles.files]), ), )] ginkgo_test = rule( implementation = _ginkgo_test_impl, attrs = { "data": attr.label_list(allow_files = True), "go_test": attr.label(executable = True, cfg = "target"), "ginkgo_args": attr.string_list(), "_ginkgo": attr.label(default = "@com_github_onsi_ginkgo//ginkgo", executable = True, cfg = "target"), }, executable = True, test = True, )
def chk_p5m(n): if n%5==0: return 0 elif n==1: return n for i in range(2,n): if n%i==0: return n return 0 def fab(n): f=[0,1] return [chk_p5m((f:=[f[-1],f[-1]+f[-2]])[0]) for i in range(n)] #i know it's little confusing most won't understand... but tried to do something unique print(*fab(int(input())))
'''Exercício Python 072: Crie um programa que tenha uma dupla totalmente preenchida com uma contagem por extenso, de zero até vinte. Seu programa deverá ler um número pelo teclado (entre 0 e 20) e mostrá-lo por extenso.''' #---------------------------------------------------- tupla20 = ('zero','um', 'dois','três','quatro','cinco','seis','sete','oito','nove','dez','onze','doze','treze','quartoze','quinze','dezesseis','dezessete','dezoito','dezenove','vinte') print('-'*40) numero = int(input('Digite um número entre "0" e "20" >>> ')) while numero not in range(0,21): numero = int(input('ERRO. Digite novamente. Nº entre "0" e "20" >>> ')) print(f'Você digitou o número "{tupla20[numero]}".') print('-'*40)
'''2) Faça um programa, com uma função que necessite de um argumento. A função retorna o valor de caractere ‘P’, se seu argumento for positivo, e ‘N’, se seu argumento for zero ou negativo.''' def pn(x): if x < 0: return "N" elif x > 0: return "P" else: return "0" num = int(input("Digite um numero: ")) print(pn(num))
n = int(input('Digite um numero :')) count = 0 for c in range(1, n + 1): if n % c == 0: print('\033[31m', end='') count = count+1 else: print('\033[m', end='') print('{}'.format(c), end=' ') print('\n \033[mo numero {} pode ser dividido {} vezes'.format(n, count)) if count == 2: print(' o numero {} é primo'.format(n)) else: print(' o numero {} não é primo'.format(n))
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Environment Base Class __author__: Conor Heins, Alexander Tschantz, Brennan Klein """ class Env(object): def reset(self, state=None): raise NotImplementedError def step(self, action): raise NotImplementedError def render(self): pass def sample_action(self): pass def get_likelihood_dist(self): raise ValueError( "<{}> does not provide a model specification".format(type(self).__name__) ) def get_transition_dist(self): raise ValueError( "<{}> does not provide a model specification".format(type(self).__name__) ) def get_uniform_posterior(self): raise ValueError( "<{}> does not provide a model specification".format(type(self).__name__) ) def get_rand_likelihood_dist(self): raise ValueError( "<{}> does not provide a model specification".format(type(self).__name__) ) def get_rand_transition_dist(self): raise ValueError( "<{}> does not provide a model specification".format(type(self).__name__) ) def __str__(self): return "<{} instance>".format(type(self).__name__)
class Solution: def largestValsFromLabels(self, values, labels, num_wanted, use_limit): zipped = list(zip(values, labels)) _dict = {x: 0 for x in set(labels)} ans = 0 for v, l in reversed(sorted(zipped)): if num_wanted == 0: return ans if _dict[l] + 1 <= use_limit: ans += v num_wanted -= 1 _dict[l] += 1 return ans
# Тестируем функцию def res(a, b): r = a * b return r res(10, 10) def test_func_res(): assert res(10, 10) == 100
""" Package contains: Database Class Decoder Class Cleaner Class MyHTMLParser Class """
class Human(): sum = 0 def __init__(self, name, age): self.name = name self.age = age def get_name(self): print(self.name) def do_homework(self): print('parent method')
d = { "no": "yes" } class CustomError: def __init__(self, fun): self.fun = fun def __call__(self, *args, **kwargs): try: return self.fun(*args, **kwargs) except Exception as e: print(e) raise Exception(d.get(str(e))) @CustomError def a(): raise Exception("no")
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. { 'name': 'Documentation', 'category': 'Website', 'summary': 'Forum, Documentation', 'description': """ Documentation based on question and pertinent answers of Forum """, 'depends': [ 'website_forum' ], 'data': [ 'data/doc_data.xml', 'security/ir.model.access.csv', 'views/doc.xml', 'views/website_doc.xml', ], 'demo': [ 'data/doc_demo.xml', ], }
# Databricks notebook source # MAGIC %md # MAGIC <img src="https://github.com/billkellett/flight-school-resources/blob/master/images/databricks icon.png?raw=true" width=100/> # MAGIC <img src="/files/flight/Megacorp.png?raw=true" width=200/> # MAGIC # Democratizing MegaCorp's Data # MAGIC # MAGIC ## MegaCorp's current challenges # MAGIC <ul> # MAGIC <li/>Hard to manage and scale infrastructure # MAGIC <li/>Multiple sources of truth because of siloed data # MAGIC <li/>Data Management and Data quality issues # MAGIC <li/>Sub-optimal performance # MAGIC <li/>Limited collaboration between teams # MAGIC </ul> # COMMAND ---------- # MAGIC %md # MAGIC ##Databricks can help! # MAGIC ####A unified Simple, Open, Collaborative Platform for your BI to AI needs # MAGIC # MAGIC <img src="https://databricks.com/wp-content/uploads/2021/10/Databricks-lakehouse-platform-2.png" width=600> # MAGIC <img src="https://databricks.com/wp-content/uploads/2021/09/Platform-image-4.svg"> # COMMAND ---------- # This creates the "team_name" field displayed at the top of the notebook. dbutils.widgets.text("team_name", "Enter your team's name") # COMMAND ---------- # Note that we have factored out the setup processing into a different notebook, which we call here. # As a flight school student, you will probably want to look at the setup notebook. # Even though you'll want to look at it, we separated it out in order to demonstrate a best practice... # ... you can use this technique to keep your demos shorter, and avoid boring your audience with housekeeping. # In addition, you can save demo time by running this initial setup command before you begin your demo. # This cell should run in a few minutes or less team_name = dbutils.widgets.get("team_name") setup_responses = dbutils.notebook.run("./includes/flight_school_assignment_1_setup", 0, {"team_name": team_name}).split() local_data_path = setup_responses[0] dbfs_data_path = setup_responses[1] database_name = setup_responses[2] print(f"Path to be used for Local Files: {local_data_path}") print(f"Path to be used for DBFS Files: {dbfs_data_path}") print(f"Database Name: {database_name}") # COMMAND ---------- # Let's set the default database name so we don't have to specify it on every query spark.sql(f"USE {database_name}") # COMMAND ---------- # MAGIC %md # MAGIC # MAGIC #### Let's talk Databricks' Delta Lake # MAGIC # MAGIC #####Delta lake is an open-source project that enables building a Lakehouse Architecture on top of existing storage systems such as S3, ADLS, GCS, and HDFS. # MAGIC # MAGIC Delta Lake brings __*Performance*__ and __*Reliability*__ to Data Lakes # MAGIC # MAGIC Why did Delta Lake have to be invented? Let's take a look... # MAGIC # MAGIC <img src="https://github.com/billkellett/flight-school-resources/blob/master/images/projects_failing.png?raw=true" width=1000/> # MAGIC # MAGIC As the graphic above shows, Big Data Lake projects have a very high failure rate. In fact, Gartner Group estimates that 85% of these projects fail (see https://www.infoworld.com/article/3393467/4-reasons-big-data-projects-failand-4-ways-to-succeed.html ). *Why* is the failure rate so high? # MAGIC # MAGIC <img src="https://github.com/billkellett/flight-school-resources/blob/master/images/projects_failing_reasons.png?raw=true" width=1000/> # MAGIC # MAGIC The graphic above shows the main __*reliability*__ issues with data lakes. Unlike relational databases, typical data lakes are not capable of transactional (ACID) behavior. This leads to a number of reliability issues: # MAGIC # MAGIC - When a job fails, incomplete work is not rolled back, as it would be in a relational database. Data may be left in an inconsistent state. This issue is extremely difficult to deal with in production. # MAGIC # MAGIC - Data lakes typically cannot enforce schema. This is often touted as a "feature" called "schema-on-read," because it allows flexibility at data ingest time. However, when downstream jobs fail trying to read corrupt data, we have a very difficult recovery problem. It is often difficult just to find the source application that caused the problem... which makes fixing the problem even harder! # MAGIC # MAGIC - Relational databases allow multiple concurrent users, and ensure that each user gets a consistent view of data. Half-completed transactions never show up in the result sets of other concurrent users. This is not true in a typical data lake. Therefore, it is almost impossible to have a concurrent mix of read jobs and write jobs. This becomes an even bigger problem with streaming data, because streams typically don't pause to let other jobs run! # MAGIC # MAGIC Next, let's look at the key __*performance issues*__ with data lakes... # MAGIC # MAGIC <img src="https://github.com/billkellett/flight-school-resources/blob/master/images/projects_failing_reasons_1.png?raw=true" width=1000/> # MAGIC # MAGIC - We have already noted that data lakes cannot provide a consistent view of data to concurrent users. This is a reliability problem, but it is also a __*performance*__ problem because if we must run jobs one at a time, our production time window becomes extremely limited. # MAGIC # MAGIC - Most data lake engineers have come face-to-face with the "small-file problem." Data is typically ingested into a data lake in batches. Each batch typically becomes a separate physical file in a directory that defines a table in the lake. Over time, the number of physical files can grow to be very large. When this happens, performance suffers because opening and closing these files is a time-consuming operation. # MAGIC # MAGIC - Experienced relational database architects may be surprised to learn that Big Data usually cannot be indexed in the same way as relational databases. The indexes become too large to be manageable and performant. Instead, we "partition" data by putting it into sub-directories. Each partition can represent a column (or a composite set of columns) in the table. This lets us avoid scanning the entire data set... *if* our queries are based on the partition column. However, in the real world, analysts are running a wide range of queries which may or may not be based on the partition column. In these scenarios, there is no benefit to partitioning. In addition, partitioning breaks down if we choose a partition column with extremely high cardinality. # MAGIC # MAGIC - Data lakes typically live in cloud storage (e.g., S3 on AWS, ADLS on Azure), and these storage devices are quite slow compared to SSD disk drives. Most data lakes have no capability to cache data on faster devices, and this fact has a major impact on performance. # MAGIC # MAGIC __*Delta Lake was built to solve these reliability and performance problems.*__ First, let's consider how Delta Lake addresses *reliability* issues... # MAGIC # MAGIC <img src="https://github.com/billkellett/flight-school-resources/blob/master/images/delta_reliability.png?raw=true" width=1000/> # MAGIC # MAGIC Note the Key Features in the graphic above. We'll be diving into all of these capabilities as we go through the Workshop: # MAGIC # MAGIC - __ACID Transactions:__ Delta Lake ACID compliance ensures that half-completed transactions are never persisted in the Lake, and concurrent users never see other users' in-flight transactions. # MAGIC # MAGIC - __Mutations:__ Experienced relational database architects may be surprised to learn that most data lakes do not support updates and deletes. These lakes concern themselves only with data ingest, which makes error correction and backfill very difficult. In contrast, Delta Lake provides full support for Inserts, Updates, and Deletes. # MAGIC # MAGIC - __Schema Enforcement:__ Delta Lake provides full support for schema enforcement at write time, greatly increasing data reliability. # MAGIC # MAGIC - __Unified Batch and Streaming:__ Streaming data is becoming an essential capability for all enterprises. We'll see how Delta Lake supports both batch and streaming modes, and in fact blurs the line between them, enabling architects to design systems that use both batch and streaming capabilities simultaneously. # MAGIC # MAGIC - __Time Travel:__ unlike most data lakes, Delta Lake enables queries of data *as it existed* at a specific point in time. This has important ramifications for reliability, error recovery, and synchronization with other systems, as we shall see later in this Workshop. # MAGIC # MAGIC We have seen how Delta Lake enhances reliability. Next, let's see how Delta Lake optimizes __*performance*__... # MAGIC # MAGIC <img src="https://github.com/billkellett/flight-school-resources/blob/master/images/delta_performance.png?raw=true" width=1000/> # MAGIC # MAGIC Again, we'll be diving into all these capabilities throughout the Workshop. We'll be concentrating especially on features that are only available in Databricks' distribution of Delta Lake... # MAGIC # MAGIC - __Compaction:__ Delta Lake provides sophisticated capabilities to solve the "small-file problem" by compacting small files into larger units. # MAGIC # MAGIC - __Caching:__ Delta Lake transparently caches data on the SSD drives of worker nodes in a Spark cluster, greatly improving performance. # MAGIC # MAGIC - __Data Skipping:__ this Delta Lake feature goes far beyond the limits of mere partitioning. # MAGIC # MAGIC - __Z-Ordering:__ this is a brilliant alternative to traditional indexing, and further enhances Delta Lake performance. # COMMAND ---------- # MAGIC %md # MAGIC <img src="/files/flight/Proposed_Architecture.png?raw=true" width=1200/> # COMMAND ---------- # Read the downloaded historical data into a dataframe # This is MegaCorp data regarding power plant device performance. It pre-dates our new IOT effort, but we want to save this data and use it in queries. dataPath = f"dbfs:/FileStore/flight/{team_name}/assignment_1_ingest.csv" df = spark.read.option("header","true").option("inferSchema","true").csv(dataPath) #display(df) # Read the downloaded backfill data into a dataframe # This is some backfill data that we'll need to merge into the main historical data. backfillDataPath = f"dbfs:/FileStore/flight/{team_name}/assignment_1_backfill.csv" df_backfill = spark.read.option("header","true").option("inferSchema","true").csv(backfillDataPath) #display(df_backfill) # Create a temporary view on the dataframes to enable SQL df.createOrReplaceTempView("historical_bronze_vw") df_backfill.createOrReplaceTempView("historical_bronze_backfill_vw") # COMMAND ---------- # MAGIC %sql # MAGIC # MAGIC -- Create a Delta Lake table for the main bronze table # MAGIC # MAGIC DROP TABLE IF EXISTS sensor_readings_historical_bronze; # MAGIC # MAGIC CREATE TABLE sensor_readings_historical_bronze # MAGIC AS SELECT * FROM historical_bronze_vw; # COMMAND ---------- # MAGIC %sql # MAGIC # MAGIC -- Let's take a peek at our new bronze table # MAGIC # MAGIC SELECT * FROM sensor_readings_historical_bronze # COMMAND ---------- # MAGIC %sql # MAGIC -- Let's count the records in the Bronze table # MAGIC # MAGIC SELECT COUNT(*) FROM sensor_readings_historical_bronze # COMMAND ---------- # MAGIC %sql # MAGIC # MAGIC -- Analysing data? No problem! Let's take a look # MAGIC # MAGIC SELECT # MAGIC count(*) as count, device_operational_status # MAGIC FROM sensor_readings_historical_bronze # MAGIC GROUP BY device_operational_status # MAGIC ORDER BY count asc; # COMMAND ---------- # MAGIC %sql # MAGIC -- Now let's make a query that accepts run-time parameters. # MAGIC -- NOTE that we have set default values so that a default query will return results on this data # MAGIC # MAGIC CREATE WIDGET DROPDOWN PARAM_END_SECOND # MAGIC DEFAULT '57' # MAGIC CHOICES SELECT DISTINCT SECOND(reading_time) AS end_second FROM sensor_readings_historical_bronze ORDER BY end_second ASC; # MAGIC CREATE WIDGET DROPDOWN PARAM_START_SECOND # MAGIC DEFAULT '54' # MAGIC CHOICES SELECT DISTINCT SECOND(reading_time) AS start_second FROM sensor_readings_historical_bronze ORDER BY start_second ASC; # MAGIC CREATE WIDGET DROPDOWN PARAM_MINUTE # MAGIC DEFAULT '18' # MAGIC CHOICES SELECT DISTINCT MINUTE(reading_time) AS minute FROM sensor_readings_historical_bronze ORDER BY minute ASC; # MAGIC CREATE WIDGET DROPDOWN PARAM_HOUR # MAGIC DEFAULT '10' # MAGIC CHOICES SELECT DISTINCT HOUR(reading_time) AS hour FROM sensor_readings_historical_bronze ORDER BY hour ASC; # MAGIC CREATE WIDGET DROPDOWN PARAM_DAY # MAGIC DEFAULT '23' # MAGIC CHOICES SELECT DISTINCT DAY(reading_time) AS day FROM sensor_readings_historical_bronze ORDER BY day ASC; # MAGIC CREATE WIDGET DROPDOWN PARAM_MONTH # MAGIC DEFAULT '2' # MAGIC CHOICES SELECT DISTINCT MONTH(reading_time) AS month FROM sensor_readings_historical_bronze ORDER BY month ASC; # MAGIC CREATE WIDGET DROPDOWN PARAM_YEAR # MAGIC DEFAULT '2015' # MAGIC CHOICES SELECT DISTINCT YEAR(reading_time) AS year FROM sensor_readings_historical_bronze ORDER BY year ASC; # MAGIC CREATE WIDGET DROPDOWN PARAM_DEVICE_ID # MAGIC DEFAULT '7G007R' # MAGIC CHOICES SELECT DISTINCT device_id FROM sensor_readings_historical_bronze ORDER BY device_id ASC; # COMMAND ---------- # MAGIC %sql # MAGIC -- Let's make a query that shows another meaningful graphical view of the table # MAGIC -- We'll parameterize this query so a Business Analyst can examine fine-grained device performance issues # MAGIC -- Experiment with different graphical views # MAGIC # MAGIC SELECT # MAGIC reading_time, # MAGIC reading_1, # MAGIC reading_2, # MAGIC reading_3 # MAGIC FROM sensor_readings_historical_bronze # MAGIC WHERE # MAGIC device_id = getArgument("PARAM_DEVICE_ID") # MAGIC AND # MAGIC YEAR(reading_time) = getArgument("PARAM_YEAR") # MAGIC AND # MAGIC MONTH(reading_time) = getArgument("PARAM_MONTH") # MAGIC AND # MAGIC DAY(reading_time) = getArgument("PARAM_DAY") # MAGIC AND # MAGIC HOUR(reading_time) = getArgument("PARAM_HOUR") # MAGIC AND # MAGIC MINUTE(reading_time) = getArgument("PARAM_MINUTE") # MAGIC AND # MAGIC SECOND(reading_time) BETWEEN getArgument("PARAM_START_SECOND") # MAGIC AND getArgument("PARAM_END_SECOND") # MAGIC ORDER BY reading_time ASC # COMMAND ---------- # MAGIC %sql # MAGIC -- Let's clean up that messy collection of widgets! # MAGIC # MAGIC REMOVE WIDGET PARAM_DEVICE_ID; # MAGIC REMOVE WIDGET PARAM_YEAR; # MAGIC REMOVE WIDGET PARAM_MONTH; # MAGIC REMOVE WIDGET PARAM_DAY; # MAGIC REMOVE WIDGET PARAM_HOUR; # MAGIC REMOVE WIDGET PARAM_MINUTE; # MAGIC REMOVE WIDGET PARAM_START_SECOND; # MAGIC REMOVE WIDGET PARAM_END_SECOND; # COMMAND ---------- # MAGIC %sql # MAGIC -- Let's take a peek at the backfill data # MAGIC # MAGIC SELECT * FROM historical_bronze_backfill_vw # COMMAND ---------- # MAGIC %sql # MAGIC -- Let's count the records in the backfill data # MAGIC # MAGIC SELECT COUNT(*) FROM historical_bronze_backfill_vw # COMMAND ---------- # MAGIC %md # MAGIC <img src="https://docs.delta.io/latest/_static/delta-lake-logo.png" width=300> # MAGIC # MAGIC ## Let's talk Medallion Architecture an how it can help ensuring data quality # MAGIC <img src="https://databricks.com/wp-content/uploads/2021/05/Bronze-Silver-Gold-Tables.png" width=600> # MAGIC # MAGIC # MAGIC # MAGIC MegaCorp has informed us that the Bronze historical data has a few issues. Let's deal with them and create a clean Silver table. # COMMAND ---------- # MAGIC %sql # MAGIC -- Let's create a Silver table. We'll start with the Bronze data, then make several improvements # MAGIC # MAGIC DROP TABLE IF EXISTS sensor_readings_historical_silver; # MAGIC # MAGIC CREATE TABLE sensor_readings_historical_silver # MAGIC AS SELECT * FROM historical_bronze_vw; # COMMAND ---------- # MAGIC %sql # MAGIC -- Let's take a peek at our new Silver table # MAGIC # MAGIC SELECT * FROM sensor_readings_historical_silver # MAGIC ORDER BY reading_time ASC # COMMAND ---------- # MAGIC %md # MAGIC #### Let's rectify the bad sensor readings in our data # COMMAND ---------- # MAGIC %sql # MAGIC -- Let's merge in the Bronze backfill data # MAGIC -- MERGE INTO is one of the most important differentiators for Delta Lake # MAGIC -- The entire backfill batch will be treated as an atomic transaction, # MAGIC -- and we can do both inserts and updates within a single batch. # MAGIC # MAGIC MERGE INTO sensor_readings_historical_silver AS h # MAGIC USING historical_bronze_backfill_vw AS b # MAGIC ON # MAGIC h.id = b.id # MAGIC WHEN MATCHED THEN UPDATE SET * # MAGIC WHEN NOT MATCHED THEN INSERT *; # COMMAND ---------- # MAGIC %sql # MAGIC -- Verify that the upserts worked correctly. # MAGIC -- Newly inserted records have dates of 2015-02-21 (and id value beginning with 'ZZZ') # MAGIC -- Updated records have id's in the backfill data that do NOT begin with 'ZZZ'. # MAGIC -- Check a few of these, and make sure that a tiny value was added to reading_1. # MAGIC -- In order to check, you might try something similar to... # MAGIC -- %sql # MAGIC select a.id, a.reading_1 as reading_1_silver, b.reading_1 as reading_1_bronze # MAGIC from sensor_readings_historical_silver a # MAGIC inner join sensor_readings_historical_bronze b # MAGIC on a.id = b.id # MAGIC where a.reading_1 <> b.reading_1 # COMMAND ---------- # MAGIC %sql # MAGIC -- MegaCorp just informed us of some dirty data. Occasionally they would receive garbled data. # MAGIC -- In those cases, they would put 999.99 in the readings. # MAGIC -- Let's find these records # MAGIC # MAGIC SELECT * # MAGIC FROM sensor_readings_historical_silver # MAGIC WHERE reading_1 = 999.99 # COMMAND ---------- # MAGIC %sql # MAGIC -- We want to fix these bogus readings. Here's the idea... # MAGIC -- - Use a SQL window function to order the readings by time within each device # MAGIC -- - Whenever there is a 999.99 reading, replace it with the AVERAGE of the PREVIOUS and FOLLOWING readings. # MAGIC -- HINTS: # MAGIC -- Window functions use an "OVER" clause... OVER (PARTITION BY ... ORDER BY ) # MAGIC -- Look up the doc for SQL functions LAG() and LEAD() # MAGIC # MAGIC -- We'll create a table of these interpolated readings, then later we'll merge it into the Silver table. # MAGIC # MAGIC DROP TABLE IF EXISTS sensor_readings_historical_interpolations; # MAGIC # MAGIC CREATE TABLE sensor_readings_historical_interpolations AS ( # MAGIC WITH lags_and_leads AS ( # MAGIC SELECT # MAGIC id, # MAGIC reading_time, # MAGIC device_type, # MAGIC device_id, # MAGIC device_operational_status, # MAGIC reading_1, # MAGIC LAG(reading_1, 1, 0) OVER (PARTITION BY device_id ORDER BY reading_time ASC, id ASC) AS reading_1_lag, # MAGIC LEAD(reading_1, 1, 0) OVER (PARTITION BY device_id ORDER BY reading_time ASC, id ASC) AS reading_1_lead, # MAGIC reading_2, # MAGIC LAG(reading_2, 1, 0) OVER (PARTITION BY device_id ORDER BY reading_time ASC, id ASC) AS reading_2_lag, # MAGIC LEAD(reading_2, 1, 0) OVER (PARTITION BY device_id ORDER BY reading_time ASC, id ASC) AS reading_2_lead, # MAGIC reading_3, # MAGIC LAG(reading_3, 1, 0) OVER (PARTITION BY device_id ORDER BY reading_time ASC, id ASC) AS reading_3_lag, # MAGIC LEAD(reading_3, 1, 0) OVER (PARTITION BY device_id ORDER BY reading_time ASC, id ASC) AS reading_3_lead # MAGIC FROM sensor_readings_historical_silver # MAGIC ) # MAGIC SELECT # MAGIC id, # MAGIC reading_time, # MAGIC device_type, # MAGIC device_id, # MAGIC device_operational_status, # MAGIC ((reading_1_lag + reading_1_lead) / 2) AS reading_1, # MAGIC ((reading_2_lag + reading_2_lead) / 2) AS reading_2, # MAGIC ((reading_3_lag + reading_3_lead) / 2) AS reading_3 # MAGIC FROM lags_and_leads # MAGIC WHERE reading_1 = 999.99 # MAGIC ORDER BY id ASC # MAGIC ) # COMMAND ---------- # MAGIC %sql # MAGIC -- Let's see how many interpolations we have. There should be 367 rows. # MAGIC # MAGIC SELECT COUNT(*) FROM sensor_readings_historical_interpolations # COMMAND ---------- # MAGIC %sql # MAGIC -- Now use MERGE INTO to update the historical table # MAGIC MERGE INTO sensor_readings_historical_silver AS s # MAGIC USING sensor_readings_historical_interpolations AS i # MAGIC ON # MAGIC s.id = i.id # MAGIC WHEN MATCHED THEN UPDATE SET * # MAGIC WHEN NOT MATCHED THEN INSERT *; # COMMAND ---------- # MAGIC %sql # MAGIC -- Now make sure we got rid of all the bogus readings. # MAGIC -- Gee, this is fast. Why? What feature in Delta Lake is making this so speedy? # MAGIC # MAGIC SELECT count(*) # MAGIC FROM sensor_readings_historical_silver # MAGIC WHERE reading_1 = 999.99 # COMMAND ---------- # MAGIC %md # MAGIC <img src="https://docs.delta.io/latest/_static/delta-lake-logo.png" width=300> # MAGIC ####Time Travel - Go back to the last known stable state of your data # COMMAND ---------- # MAGIC %sql # MAGIC -- List all the versions of the table that are available to us # MAGIC # MAGIC DESCRIBE HISTORY sensor_readings_historical_silver # COMMAND ---------- # MAGIC %sql # MAGIC -- Ah, version 1 should have the 999.99 values # MAGIC # MAGIC SELECT # MAGIC * # MAGIC FROM # MAGIC sensor_readings_historical_silver # MAGIC VERSION # MAGIC AS OF 1 # MAGIC WHERE # MAGIC reading_1 = 999.99; # COMMAND ---------- dbutils.fs.help() # COMMAND ---------- dbutils.fs.head(f"dbfs:/FileStore/flight/{team_name}/assignment_1_ingest.csv") # COMMAND ---------- # MAGIC %md # MAGIC <img src="https://docs.delta.io/latest/_static/delta-lake-logo.png" width=300> # MAGIC ###Handling Schema Evolution # COMMAND ---------- # Read the downloaded historical data into a dataframe # This is MegaCorp data regarding power plant device performance. It pre-dates our new IOT effort, but we want to save this data and use it in queries. dataPath = f"dbfs:/FileStore/flight/{team_name}/sensor_new_schema.csv" df = spark.read.option("header","true").option("inferSchema","true").csv(dataPath) display(df) # Create a temporary view on the dataframes to enable SQL df.createOrReplaceTempView("new_schema_bronze_vw") # COMMAND ---------- # MAGIC %sql # MAGIC INSERT INTO sensor_readings_historical_bronze # MAGIC SELECT # MAGIC * # MAGIC FROM # MAGIC new_schema_bronze_vw; # COMMAND ---------- # MAGIC %sql # MAGIC set spark.databricks.delta.schema.autoMerge.enabled=true; # MAGIC # MAGIC INSERT INTO sensor_readings_historical_bronze # MAGIC SELECT # MAGIC * # MAGIC FROM # MAGIC new_schema_bronze_vw; # COMMAND ---------- # MAGIC %sql # MAGIC SELECT # MAGIC * # MAGIC FROM # MAGIC sensor_readings_historical_bronze # MAGIC WHERE # MAGIC reading_4 IS NOT NULL; # COMMAND ---------- # MAGIC %sql # MAGIC -- Here is an example of a Gold table # MAGIC DROP TABLE IF EXISTS sensor_readings_historical_gold_stats; # MAGIC # MAGIC CREATE TABLE sensor_readings_historical_gold_stats AS # MAGIC SELECT # MAGIC device_id # MAGIC , avg(reading_1) as avg_1 # MAGIC , avg(reading_2) as avg_2 # MAGIC , avg(reading_3) as avg_3 # MAGIC , min(reading_1) as min_1 # MAGIC , min(reading_2) as min_2 # MAGIC , min(reading_3) as min_3 # MAGIC , max(reading_1) as max_1 # MAGIC , max(reading_2) as max_2 # MAGIC , max(reading_3) as max_3 # MAGIC FROM # MAGIC sensor_readings_historical_silver # MAGIC GROUP BY # MAGIC device_id # COMMAND ---------- # MAGIC %sql # MAGIC -- Range of sensor readings: minimum, maximum, and the average. # MAGIC SELECT # MAGIC device_id # MAGIC , min_1 # MAGIC , max_1 # MAGIC , avg_1 # MAGIC , min_2 # MAGIC , max_2 # MAGIC , avg_2 # MAGIC , min_3 # MAGIC , max_3 # MAGIC , avg_3 # MAGIC FROM # MAGIC sensor_readings_historical_gold_stats # COMMAND ---------- # MAGIC %md # MAGIC <img src="https://databricks.com/wp-content/uploads/2021/08/photon-icon.svg"> # MAGIC ###All that is great! But what about performance?? # MAGIC #### Databricks' Runtime has been consistently benchmarked order of magnitude faster comapred to OSS and other verdors' Spark as well as various other offerings # MAGIC #### Photon take the performance to next level # MAGIC # MAGIC Please see the following link for further details. # MAGIC https://databricks.com/product/photon # MAGIC https://databricks.com/blog/2017/07/12/benchmarking-big-data-sql-platforms-in-the-cloud.html # MAGIC https://databricks.com/blog/2021/11/02/databricks-sets-official-data-warehousing-performance-record.html # MAGIC https://pages.databricks.com/Benchmarking-Big-Data-Platforms.html # COMMAND ---------- # MAGIC %md # MAGIC <img src="https://docs.delta.io/latest/_static/delta-lake-logo.png" width=300> # MAGIC ####Delta Lake features for enhanced performance # MAGIC # MAGIC Let's begin with __*partition*__ # COMMAND ---------- # MAGIC %sql # MAGIC -- DESCRIBE EXTENDED will give us some partition information, and will also tell us the location of the data # MAGIC -- Hmmm, looks like we are not partitioned. What does that mean? # MAGIC # MAGIC DESCRIBE EXTENDED sensor_readings_historical_silver # COMMAND ---------- # Let's look at the physical file layout in a non-partitioned table dbutils.fs.ls(f"dbfs:/user/hive/warehouse/{database_name}.db/sensor_readings_historical_silver") # As you can see, the data is just broken into a set of files, without regard to the meaning of the data # COMMAND ---------- # MAGIC %sql # MAGIC -- Let's create a Silver table partitioned by Device. # MAGIC -- Create a new table, so we can compare new and old # MAGIC # MAGIC DROP TABLE IF EXISTS sensor_readings_historical_silver_by_device; # MAGIC # MAGIC CREATE TABLE sensor_readings_historical_silver_by_device # MAGIC PARTITIONED BY (device_id) # MAGIC AS SELECT * FROM sensor_readings_historical_silver # COMMAND ---------- # MAGIC %sql # MAGIC -- We can see partition information # MAGIC # MAGIC DESCRIBE EXTENDED sensor_readings_historical_silver_by_device # COMMAND ---------- # Now we have subdirectories for each device, with physical files inside them # Will that speed up queries? dbutils.fs.ls(f"dbfs:/user/hive/warehouse/{database_name}.db/sensor_readings_historical_silver_by_device") # COMMAND ---------- # MAGIC %sql # MAGIC -- Let's create a Silver table partitioned by BOTH Date AND Hour. # MAGIC -- Note that Delta cannot partition by expressions, so I have to explicitly create the partition columns # MAGIC -- HINT: Use the DATE() function to extract date from a timestamp, and use the HOUR() function to extract hour from a timestamp # MAGIC # MAGIC DROP TABLE IF EXISTS sensor_readings_historical_silver_by_hour; # MAGIC # MAGIC CREATE TABLE sensor_readings_historical_silver_by_hour # MAGIC PARTITIONED BY (reading_date, reading_hour) # MAGIC AS SELECT # MAGIC * # MAGIC , DATE(reading_time) as reading_date # MAGIC , HOUR(reading_time) as reading_hour # MAGIC FROM # MAGIC sensor_readings_historical_silver # COMMAND ---------- # NOTE how the hour directories are nested within the date directories dbutils.fs.ls(f"dbfs:/user/hive/warehouse/{database_name}.db/sensor_readings_historical_silver_by_hour/reading_date=2015-02-24") # COMMAND ---------- # MAGIC %sql # MAGIC -- Let's create a Silver table partitioned by Date AND Hour AND Minute. # MAGIC -- Note that Delta cannot partition by expressions, so I have to explicitly create the partition columns # MAGIC # MAGIC DROP TABLE IF EXISTS sensor_readings_historical_silver_by_hour_and_minute; # MAGIC # MAGIC CREATE TABLE sensor_readings_historical_silver_by_hour_and_minute # MAGIC PARTITIONED BY (reading_date, reading_hour, reading_minute) # MAGIC AS # MAGIC SELECT # MAGIC * # MAGIC , DATE(reading_time) as reading_date # MAGIC , HOUR(reading_time) as reading_hour # MAGIC , MINUTE(reading_time) as reading_minute # MAGIC FROM # MAGIC sensor_readings_historical_silver # COMMAND ---------- # MAGIC %sql # MAGIC -- Let's take a peek at our minute-partitioned table # MAGIC # MAGIC SELECT # MAGIC * # MAGIC FROM # MAGIC sensor_readings_historical_silver_by_hour_and_minute # MAGIC LIMIT 100 # COMMAND ---------- # MAGIC %sql # MAGIC -- Now let's take some timings that compare our partitioned Silver tables against the unpartitioned Silver table # MAGIC -- Here is an example "baseline" query against the unpartitioned Silver table # MAGIC -- (run these queries several times to get a rough average) # MAGIC # MAGIC SELECT # MAGIC * # MAGIC FROM # MAGIC sensor_readings_historical_silver # MAGIC WHERE # MAGIC DATE(reading_time) = '2015-02-24' # MAGIC AND HOUR(reading_time) = '14' # MAGIC AND MINUTE(reading_time) = '2' # COMMAND ---------- # MAGIC %sql # MAGIC -- Now compare the time for the same query against a partitioned table # MAGIC -- Think and discuss... Did both data skipping and partitioning play a part here? How could you combine data skipping and partitioning to make queries even more performant? # MAGIC # MAGIC SELECT # MAGIC * # MAGIC FROM # MAGIC sensor_readings_historical_silver_by_hour_and_minute # MAGIC WHERE # MAGIC reading_date = '2015-02-24' # MAGIC AND reading_hour = '14' # MAGIC AND reading_minute = '2' # COMMAND ---------- # MAGIC %md # MAGIC # MAGIC <img src="https://docs.delta.io/latest/_static/delta-lake-logo.png" width=300> # MAGIC ####Delta Lake Caching # COMMAND ---------- # MAGIC %sql # MAGIC CACHE SELECT * FROM sensor_readings_historical_silver # COMMAND ---------- # MAGIC %md # MAGIC #Delta Live Tables # MAGIC ##Reliable data engineering made easy # MAGIC # MAGIC Delta Live Tables (DLT) makes it easy to build and manage reliable data pipelines that deliver high quality data on Delta Lake. DLT helps data engineering teams simplify ETL development and management with declarative pipeline development, automatic data testing, and deep visibility for monitoring and recovery. # MAGIC # MAGIC Here's what Delta Live Tables do for you. # MAGIC # MAGIC ###More easily build and maintain data pipelines</li> # MAGIC <img src="https://databricks.com/wp-content/uploads/2021/09/Live-Tables-Pipeline.png"> # MAGIC # MAGIC --- # MAGIC ###Automatic Testing # MAGIC <img src="https://databricks.com/wp-content/uploads/2021/05/Bronze-Silver-Gold-Tables.png"> # MAGIC # MAGIC --- # MAGIC ###Deep visibility for monitoring and easy recovery # MAGIC <img src="https://databricks.com/wp-content/uploads/2021/05/Pipeline-Graph.png"> # COMMAND ---------- # MAGIC %md # MAGIC # MAGIC <img src="https://docs.delta.io/latest/_static/delta-lake-logo.png" width=300> # MAGIC Read more about Delta Lake - https://delta.io/ # MAGIC Read more about Delta Live Tables - https://databricks.com/product/delta-live-tables # MAGIC Read more about Caching - https://docs.databricks.com/delta/optimizations/delta-cache.html # MAGIC Read more about ZOrdering - https://docs.databricks.com/delta/optimizations/file-mgmt.html # COMMAND ---------- # MAGIC %sql # MAGIC drop table if exists dummy1; # MAGIC create table dummy1 as select * from sensor_readings_historical_bronze limit 5; # COMMAND ---------- # MAGIC %sql # MAGIC describe history dummy1 # COMMAND ---------- dbutils.fs.ls(f"dbfs:/user/hive/warehouse/dummy1") # COMMAND ---------- # MAGIC %sql # MAGIC select * from dummy1 version as of 17 # COMMAND ---------- # MAGIC %sql # MAGIC DESCRIBE HISTORY DUMMY1 # COMMAND ---------- # MAGIC %sql # MAGIC alter table dummy1 set tblproperties(delta.logRetentionDuration="interval 0 hours") # COMMAND ---------- # MAGIC %sql # MAGIC show tblproperties dummy1 # COMMAND ---------- # MAGIC %sql # MAGIC set spark.databricks.delta.retentionDurationCheck.enabled = false; # MAGIC # MAGIC VACUUM dummy1 RETAIN 0 HOURS # COMMAND ---------- # MAGIC %sql # MAGIC update dummy1 set reading_1=126 where id="34fb2d8a-5829-4036-adea-a08ccc2c260c" # COMMAND ---------- # MAGIC %sql # MAGIC select * from dummy1 # COMMAND ---------- # MAGIC %sql # MAGIC optimize dummy1 # COMMAND ---------- dbutils.fs.ls(f"dbfs:/user/hive/warehouse//dummy1") # COMMAND ---------- dbutils.fs.ls(f"dbfs:/user/hive/warehouse/dummy1/_delta_log") # COMMAND ---------- df = spark.read.json("dbfs:/user/hive/warehouse/dummy1/_delta_log/00000000000000000010.json") # COMMAND ---------- # MAGIC %sql # MAGIC set spark.databricks.delta.formatCheck.enabled=false # COMMAND ---------- # MAGIC %scala # MAGIC # MAGIC val df = spark.read.parquet("dbfs:/user/hive/warehouse/dummy1/part-00000-7f2a6d7c-1d44-47e1-a37c-da877c15e860-c000.snappy.parquet") # MAGIC display(df) # COMMAND ---------- # MAGIC %scala # MAGIC # MAGIC val df = spark.read.parquet("dbfs:/user/hive/warehouse/dummy1/_delta_log/00000000000000000020.checkpoint.parquet") # MAGIC display(df) # COMMAND ---------- df = spark.read.json("dbfs:/user/hive/warehouse//dummy1/_delta_log/00000000000000000028.json") display(df) ##Out[59]: [FileInfo(path='dbfs:/user/hive/warehouse/dummy1/_delta_log/', name='_delta_log/', size=0), ## FileInfo(path='dbfs:/user/hive/warehouse/dummy1/part-00000-7d18212c-7240-4194-94d2-a34f56195076-c000.snappy.parquet', name='part-00000-7d18212c-7240-4194-94d2-a34f56195076-c000.snappy.parquet', size=3292), ## FileInfo(path='dbfs:/user/hive/warehouse/dummy1/part-00000-7f2a6d7c-1d44-47e1-a37c-da877c15e860-c000.snappy.parquet', name='part-00000-7f2a6d7c-1d44-47e1-a37c-da877c15e860-c000.snappy.parquet', size=3293), ## FileInfo(path='dbfs:/user/hive/warehouse/dummy1/part-00000-940cde1d-5373-4986-aa73-cd099ae021d7-c000.snappy.parquet', name='part-00000-940cde1d-5373-4986-aa73-cd099ae021d7-c000.snappy.parquet', size=3293)] # COMMAND ---------- dbutils.fs.rm(f"dbfs:/user/hive/warehouse//dummy1/_delta_log/00000000000000000000.json") # COMMAND ---------- # MAGIC %sql # MAGIC describe formatted dummy1 # COMMAND ---------- # MAGIC %sql # MAGIC VACUUM dummy1 retain 0 hours # COMMAND ----------
# binary search def binary(srchlist,srch): """list needs to be in ascending order to search for element""" first = 0 last = len(srchlist)-1 while first <= last: mid = (first + last)/2 if srch > srchlist[mid]: first = mid+1 elif srch < srchlist[mid]: last = mid-1 else: return mid return -1
#Longest Collatz Sequence #Solving for Project Euler.Net Problem 14. #Given n -> n/2 (n is even) # n -> 3n + 1 (n is odd) # #Which starting number, under one million, produces the longest chain? # #By Alex Murshak def collatz(n): count = 0 while n>1: if n%2==0: n= n/2 else: n= 3*n+1 count += 1 return count C_large = 0 I_large = 0 for i in range(1,1000000,1): C = collatz(i) if C> C_large: C_large = C I_large = i print(I_large)
class Solution: def romanToInt(self, s: str) -> int: translations = { "I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000 } number = 0 s = s.replace("IV", "IIII").replace("IX", "VIIII") s = s.replace("XL", "XXXX").replace("XC", "LXXXX") s = s.replace("CD", "CCCC").replace("CM", "DCCCC") for char in s:number += translations[char] return number
budget = float(input()) price_1kg_flour = float(input()) colored_eggs = 0 price_1pack_eggs = price_1kg_flour * 0.75 price_250ml_milk = (price_1kg_flour + (price_1kg_flour * 0.25)) / 4 price_1_bread = price_1pack_eggs + price_1kg_flour + price_250ml_milk count_breads = int(budget // price_1_bread) for current_bread in range(1, count_breads + 1): colored_eggs += 3 if current_bread % 3 == 0: lost_colored_eggs = current_bread - 2 colored_eggs -= lost_colored_eggs money_left = budget - (count_breads * price_1_bread) print(f"You made {count_breads} loaves of Easter bread! Now you have {colored_eggs} eggs and {money_left:.2f}BGN left.")
def min_value(gameState): """ Return the game state utility if the game is over, otherwise return the minimum value over all legal successors # HINT: Assume that the utility is ALWAYS calculated for player 1, NOT for the "active" player """ # TODO: finish this function! if gameState.terminal_test(): return gameState.utility(0) v = float("inf") for a in gameState.actions(): v = min(v, max_value(gameState.result(a))) return v def max_value(gameState): """ Return the game state utility if the game is over, otherwise return the maximum value over all legal successors # HINT: Assume that the utility is ALWAYS calculated for player 1, NOT for the "active" player """ # TODO: finish this function! if gameState.terminal_test(): return gameState.utility(0) v = float("-inf") for a in gameState.actions(): v = max(v, min_value(gameState.result(a))) return v
#!/usr/bin/env python3 """ Exercise 20: Word Count Mimic the Un*x "wc" command to count lines, words, and characters. """ def wc(filename): lines = 0 words = 0 characters = 0 distinct_words = set() with open(filename) as f: for line in f: lines += 1 words += len(line.split()) characters += len(line) distinct_words.update(line.split()) print(f'Characters: {characters}.') print(f'Words: {words}.') print(f'Distinct Words: {len(distinct_words)}.') print(f'Lines: {lines}.') if __name__ == '__main__': wc('wcfile.txt') print() wc('rev_eli.txt')
# O(n) time | O(h) space - where n is the number of nodes in the Binary Tree # and h is the height of the Binary Tree def nodeDepths(root, depth = 0): if root is None: return 0 return depth + nodeDepths(root.left, depth + 1) + nodeDepths(root.right, depth + 1) # This is the class of the input binary tree. class BinaryTree: def __init__(self, value): self.value = value self.left = None self.right = None
class KeySpline(Freezable,ISealable,IFormattable): """ This class is used by a spline key frame to define animation progress. KeySpline(controlPoint1: Point,controlPoint2: Point) KeySpline() KeySpline(x1: float,y1: float,x2: float,y2: float) """ def CloneCore(self,*args): """ CloneCore(self: KeySpline,sourceFreezable: Freezable) Makes this instance a deep copy of the specified System.Windows.Media.Animation.KeySpline. When copying dependency properties,this method copies resource references and data bindings (but they might no longer resolve) but not animations or their current values. sourceFreezable: The System.Windows.Media.Animation.KeySpline to clone. """ pass def CloneCurrentValueCore(self,*args): """ CloneCurrentValueCore(self: KeySpline,sourceFreezable: Freezable) Makes this instance a modifiable deep copy of the specified System.Windows.Media.Animation.KeySpline using current property values. Resource references, data bindings,and animations are not copied,but their current values are. sourceFreezable: The System.Windows.Media.Animation.KeySpline to clone. """ pass def CreateInstance(self,*args): """ CreateInstance(self: Freezable) -> Freezable Initializes a new instance of the System.Windows.Freezable class. Returns: The new instance. """ pass def CreateInstanceCore(self,*args): """ CreateInstanceCore(self: KeySpline) -> Freezable Creates a new instance of System.Windows.Media.Animation.KeySpline. Returns: A new instance of System.Windows.Media.Animation.KeySpline. """ pass def FreezeCore(self,*args): """ FreezeCore(self: Freezable,isChecking: bool) -> bool Makes the System.Windows.Freezable object unmodifiable or tests whether it can be made unmodifiable. isChecking: true to return an indication of whether the object can be frozen (without actually freezing it); false to actually freeze the object. Returns: If isChecking is true,this method returns true if the System.Windows.Freezable can be made unmodifiable,or false if it cannot be made unmodifiable. If isChecking is false,this method returns true if the if the specified System.Windows.Freezable is now unmodifiable,or false if it cannot be made unmodifiable. """ pass def GetAsFrozenCore(self,*args): """ GetAsFrozenCore(self: KeySpline,sourceFreezable: Freezable) Makes this instance a clone of the specified System.Windows.Media.Animation.KeySpline object. sourceFreezable: The System.Windows.Media.Animation.KeySpline object to clone. """ pass def GetCurrentValueAsFrozenCore(self,*args): """ GetCurrentValueAsFrozenCore(self: KeySpline,sourceFreezable: Freezable) Makes this instance a frozen clone of the specified System.Windows.Media.Animation.KeySpline. Resource references,data bindings,and animations are not copied,but their current values are. sourceFreezable: The System.Windows.Media.Animation.KeySpline to copy and freeze. """ pass def GetSplineProgress(self,linearProgress): """ GetSplineProgress(self: KeySpline,linearProgress: float) -> float Calculates spline progress from a supplied linear progress. linearProgress: The linear progress to evaluate. Returns: The calculated spline progress. """ pass def OnChanged(self,*args): """ OnChanged(self: KeySpline) Called when the current System.Windows.Media.Animation.KeySpline object is modified. """ pass def OnFreezablePropertyChanged(self,*args): """ OnFreezablePropertyChanged(self: Freezable,oldValue: DependencyObject,newValue: DependencyObject,property: DependencyProperty) This member supports the Windows Presentation Foundation (WPF) infrastructure and is not intended to be used directly from your code. oldValue: The previous value of the data member. newValue: The current value of the data member. property: The property that changed. OnFreezablePropertyChanged(self: Freezable,oldValue: DependencyObject,newValue: DependencyObject) Ensures that appropriate context pointers are established for a System.Windows.DependencyObjectType data member that has just been set. oldValue: The previous value of the data member. newValue: The current value of the data member. """ pass def OnPropertyChanged(self,*args): """ OnPropertyChanged(self: Freezable,e: DependencyPropertyChangedEventArgs) Overrides the System.Windows.DependencyObject implementation of System.Windows.DependencyObject.OnPropertyChanged(System.Windows.DependencyPropertyChangedEventAr gs) to also invoke any System.Windows.Freezable.Changed handlers in response to a changing dependency property of type System.Windows.Freezable. e: Event data that contains information about which property changed,and its old and new values. """ pass def ReadPreamble(self,*args): """ ReadPreamble(self: Freezable) Ensures that the System.Windows.Freezable is being accessed from a valid thread. Inheritors of System.Windows.Freezable must call this method at the beginning of any API that reads data members that are not dependency properties. """ pass def ShouldSerializeProperty(self,*args): """ ShouldSerializeProperty(self: DependencyObject,dp: DependencyProperty) -> bool Returns a value that indicates whether serialization processes should serialize the value for the provided dependency property. dp: The identifier for the dependency property that should be serialized. Returns: true if the dependency property that is supplied should be value-serialized; otherwise,false. """ pass def ToString(self,formatProvider=None): """ ToString(self: KeySpline,formatProvider: IFormatProvider) -> str Creates a string representation of this System.Windows.Media.Animation.KeySpline based on the supplied System.IFormatProvider. formatProvider: The format provider to use. If provider is null,the current culture is used. Returns: A string representation of this instance of System.Windows.Media.Animation.KeySpline. ToString(self: KeySpline) -> str Creates a string representation of this instance of System.Windows.Media.Animation.KeySpline based on the current culture. Returns: A string representation of this System.Windows.Media.Animation.KeySpline. """ pass def WritePostscript(self,*args): """ WritePostscript(self: Freezable) Raises the System.Windows.Freezable.Changed event for the System.Windows.Freezable and invokes its System.Windows.Freezable.OnChanged method. Classes that derive from System.Windows.Freezable should call this method at the end of any API that modifies class members that are not stored as dependency properties. """ pass def WritePreamble(self,*args): """ WritePreamble(self: Freezable) Verifies that the System.Windows.Freezable is not frozen and that it is being accessed from a valid threading context. System.Windows.Freezable inheritors should call this method at the beginning of any API that writes to data members that are not dependency properties. """ pass def __format__(self,*args): """ __format__(formattable: IFormattable,format: str) -> str """ pass def __init__(self,*args): """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass @staticmethod def __new__(self,*__args): """ __new__(cls: type) __new__(cls: type,x1: float,y1: float,x2: float,y2: float) __new__(cls: type,controlPoint1: Point,controlPoint2: Point) """ pass def __str__(self,*args): pass ControlPoint1=property(lambda self: object(),lambda self,v: None,lambda self: None) """The first control point used to define a Bezier curve that describes a System.Windows.Media.Animation.KeySpline. Get: ControlPoint1(self: KeySpline) -> Point Set: ControlPoint1(self: KeySpline)=value """ ControlPoint2=property(lambda self: object(),lambda self,v: None,lambda self: None) """The second control point used to define a Bezier curve that describes a System.Windows.Media.Animation.KeySpline. Get: ControlPoint2(self: KeySpline) -> Point Set: ControlPoint2(self: KeySpline)=value """
# Lesson 1 - Hello world and entry points # # All languages have an "entry point" # An entry point is where the program begins execution # "Main" is a common keyword used to specify the entry point # # Common C style entry points: # int main() # { # return 0; # } # or # void main() # { # } # Hello World is a common and simple IO program to test a language # and teach very basic concepts like console I/O def helloWorld(): print("Hello World") if __name__ == '__main__': helloWorld()
PASSWD = '12345' def password_required(func): def wrapper(): password = input('Cual es el passwd ? ') if password == PASSWD: return func() else: print('error') return wrapper def p_decorate(func): def func_wrapper(name): return "<p>{0}</p>".format(func(name)) return func_wrapper def strong_decorate(func): def func_wrapper(name): return "<strong>{0}</strong>".format(func(name)) return func_wrapper def div_decorate(func): def func_wrapper(name): return "<div>{0}</div>".format(func(name)) return func_wrapper def upper_dec(func): def wrapper(*args,**kwargs): result = func(*args,**kwargs) return result.upper() return wrapper @upper_dec def get_my_name(name): return "My name is {0}".format(name) @div_decorate @p_decorate @strong_decorate def get_text(name): return "lorem ipsum, {0} dolor sit amet".format(name) a = get_text @password_required def needs_password(): print('la contra esta correcta!!') ######################################################## ## test general arguments "*args,**kwargs" def test_valor_kwargs(**kwargs): if kwargs is not None: for key, value in kwargs.items(): print('%s == %s' %(key,value)) print(type(kwargs)) def test_valor_args(n_arg, *args): print('primer valor normal: ', n_arg) for arg in args: print('este es un valor de *args: ',arg) print(type(args)) def test_valor_kwargs_args(*args, **kwargs): print(type(kwargs)) print(kwargs) print('----------') print(type(args)) print(args) ################################################################################ ### example decorators in classes def p_decorate_cla(func): def func_wrapper(*args, **kwargs): return "<p>{0}</p>".format(func(*args, **kwargs)) return func_wrapper class Person(): def __init__(self): self.name = "John" self.family = "Doe" @p_decorate_cla def get_fullname(self): return self.name+" "+self.family if __name__ == '__main__': #print(get_my_name('johan')) print(a('johan')) #test_valor_kwargs(caricatura='batman') #test_valor_args('carlos','Karla','Paola','Elena',[1,2,3,5,1]) #test_valor_kwargs_args('flash', 'batman',[1,2,3,5,1], caricatura='batman', empresa = 'dc') # #my_person = Person() #print(my_person.get_fullname()) #needs_password() #print(get_text('johan'))
# In the first line, we are calling the print() function to display # an informational message. It is the same as printing like # we previously did in the hello world file print('Interest Calculator:') # These next three lines, we're using the following variables to store # the input provided by the user. The variable 'amount' represents # the principal amount borrowed. amount = float(input('Principal amount ?')) # 'roi' represents the rate of interest levied on the principal amount. # float(value) -> It converts a value to a float type number. roi = float(input('Rate of Interest ?')) # 'years' represents the number of years of the borrowing period # int(value) -> It converts any value to a plain integer. years = int(input('Duration (no. of years) ?')) # Using the variable 'total', we can store the result of the complex assignment # The total -> It represents the total amount to be paid after the borrowing period. total = (amount * pow(1 + (roi/100), years)) # Further to add, this assignment involves the use of the following Python’s arithmetic operators and functions. # + Addition -> It adds numbers on either side of the operator. # * Multiplication -> It multiplies numbers on either side of the operator. # / Division -> It divides left-hand operand by right-hand operand. # pow(X, Y, Z) -> It determines [X to the power Y]. If Z is available, then it’ll return X to the power Y, modulo Z. # Use Python's subtraction operator to calculate the interest amount interest = total - amount # Finally, there is a print statement displaying the interest amount. Since it is a # float value, so the print() function will show the full number by default. # Hence, we are using the floating point format specifier “%0.2f” in print() function # so that we can limit the printing up to two decimal points. print('\nInterest = %0.2f' %interest) # To see the output in Terminal, right click the file and select 'Run Python File in Terminal'
def euclid(n, m): if n > m: r = m m = n n = r r = m % n while r != 0: m = n n = r r = m % n return n
"""Loads the gflags library""" # Sanitize a dependency so that it works correctly from code that includes # Apollo as a submodule. def clean_dep(dep): return str(Label(dep)) def repo(): # gflags native.new_local_repository( name = "com_github_gflags_gflags", build_file = clean_dep("//third_party/gflags:gflags.BUILD"), path = "/usr/local/include/gflags", ) #http_archive( # name = "com_github_gflags_gflags", # build_file = "gflags.BUILD", # sha256 = "34af2f15cf7367513b352bdcd2493ab14ce43692d2dcd9dfc499492966c64dcf", # strip_prefix = "gflags-2.2.2", # urls = ["https://github.com/gflags/gflags/archive/v2.2.2.tar.gz"], #)
def bisection_search(arr: list, n: int) -> bool: mid = len(arr) // 2 if len(arr) < 2: if arr[mid] == n: return True else: return False else: if arr[mid] == n: return True else: return bisection_search(arr[:mid], n) if arr[mid] > n else bisection_search(arr[mid:], n) print(bisection_search([1, 2, 3, 4, 5, 6, 7, 8, 9, 11], 12))
def extractNetorutranslationsWordpressCom(item): ''' Parser for 'netorutranslations.wordpress.com' ''' vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if not (chp or vol) or "preview" in item['title'].lower(): return None tagmap = [ ('pilgrimage', 'Netorare Pilgrimage of the Saint', 'translated'), ('production', 'Perfect & Virtuous Girlfriend’s NTR Production', 'translated'), ('Alicia', 'NTR Fantasy – The Empire’s Saint Alicia –', 'translated'), ('Pleasant', 'Omae no neechan no ma〇Ko kimochiyo sugi.', 'translated'), ('prostitution', 'Childhood friend and student council who traveled to a prostitution harem RPG! -Heroines who sell their bodies and become bitches while loving me-', 'translated'), ('obscene', 'For whom did she become obscene', 'translated'), ('overthrow', 'It seems that innocent prayers and funny friends will overthrow the thousand-years-long reign of the Demon Lord', 'translated'), ('corrupted', 'My girlfriend is corrupted by a world whose common sense has been twisted', 'translated'), ('slaying', 'The Demon-Slaying Girl', 'translated'), ('Sofia', 'Saintess Academy Sofia – Academy story of a JC that is earnestly going through erotic fights –', 'translated'), ('mining', 'If I mined the whole life at VRMMO, it may have transited to a similar world that is similar [NTR]', 'translated'), ('Sword', 'Sexual swordsmanship dojo ~Naughty practice with beloved disciples with crotch sword~', 'translated'), ('actress', 'My girlfriend is an AV actress', 'translated'), ('dualism', 'Dualism ~She is embraced by a male friend while loving her boyfriend~', 'translated'), ('propensity', 'The girls who cooperate with my propensity for netorare', 'translated'), ('lend', 'Today, I lent her out as a thing', 'translated'), ('dedicated', 'Dedicated to my beloved and bewitching white-haired loli', 'translated'), ('666', 'My childhood friend who was engraved with an indecent crest in the magic book I found in the storehouse, and the almost 666 men who held her', 'translated'), ('Loiterous', 'Loiterous', 'oel'), ] for tagname, name, tl_type in tagmap: if tagname in item['tags']: return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
""" File: student_info_dict.py ------------------------------ This program puts data in a text file into a nested data structure where key is the name of each student, and the value is the dict that stores the student info """ # The file name of our target text file FILE = 'romeojuliet.txt' # Contains the chars we would like to ignore while processing the words PUNCTUATION = '.,;!?#&-\'_+=/\\"@$^%()[]{}~' def main(): d = {} with open(FILE, 'r') as f: for line in f: token_list = line.split() for token in token_list: token = string_manipulation(token) # 不知道存不存在怎麼加啦! key error~ # d[token] += 1 # key為該文字,value為該文字出現之次數 if token in d: d[token] += 1 else: # 注意初始值為1,非0,因為當你看到它時是它出現的第一次!! d[token] = 1 print_out_d(d) def print_out_d(d): """ : param d: (dict) key of type str is a word value of type int is the word occurrence --------------------------------------------------------------- This method prints out all the info in d """ for key, value in sorted(d.items(), key=lambda t: t[1]): print(key, '->', value) def string_manipulation(word): word = word.lower() ans = '' for ch in word: if ch.isalpha() or ch.isdigit(): # if ch not in PUNTUATION: ans += ch return ans if __name__ == '__main__': main()
# 迭代器对象:__iter__和__next__ # python中所有的迭代环境都会尝试先用__iter__方法,然后尝试__getitem__ # 从技术角度来讲迭代环境是通过调用内置函数iter去尝试寻找__iter__方法来实现的 # 这种方法应该返回一个迭代器对象,如果已经提供了Python就会重复调用这个迭代器对象的next # 对象知道发生StopIteration异常,如果没有找到这类__iter__就会改用__getitem__机制 # 定义迭代器类生成平方值 class Squares: def __init__(self, start, stop): self.value = start - 1 self.stop = stop def __iter__(self): return self def __next__(self): if self.value == self.stop: raise StopIteration self.value += 1 return self.value * 2 if __name__ == "__main__": X = Squares(1,5) for i in X: print(i, end=" ") # __iter__只循环一次 print([i for i in X])
l1 = int(input('Digite o lado 1 ')) l2 = int(input('Digite o lado 2 ')) l3 = int(input('Digite o lado 3 ')) if l1+l2>l3 and l1+l3>l2 and l2+l3>l1: print(f'O triangulo PODE ser formado') else: print(f'O triangulo NAO PODE ser formado')
''' 02 - Creating two-factor Let's continue looking at the student_data dataset of students in secondary school. Here, we want to answer the following question: does a student's first semester grade ("G1") tend to correlate with their final grade ("G3")? There are many aspects of a student's life that could result in a higher or lower final grade in the class. For example, some students receive extra educational support from their school ("schoolsup") or from their family ("famsup"), which could result in higher grades. Let's try to control for these two factors by creating subplots based on whether the student received extra educational support from their school or family. Seaborn has been imported as sns and matplotlib.pyplot has been imported as plt. Instructions 1/3 - Use relplot() to create a scatter plot with "G1" on the x-axis and "G3" on the y-axis, using the student_data DataFrame. ''' # Create a scatter plot of G1 vs. G3 sns.relplot(x="G1", y="G3", data=student_data, kind="scatter", ) # Show plot plt.show() ''' Instructions 2/3 - Create column subplots based on whether the student received support from the school ("schoolsup"), ordered so that "yes" comes before "no". ''' # Adjust to add subplots based on school support sns.relplot(x="G1", y="G3", data=student_data, kind="scatter", col="schoolsup", col_order=["yes", "no"]) # Show plot plt.show() ''' Instructions 3/3 - Add row subplots based on whether the student received support from the family ("famsup"), ordered so that "yes" comes before "no". This will result in subplots based on two factors. ''' # Adjust further to add subplots based on family support sns.relplot(x="G1", y="G3", data=student_data, kind="scatter", col="schoolsup", col_order=["yes", "no"], row="famsup", row_order=["yes", "no"]) # Show plot plt.show()
def f(i): return i + 2 def g(i): return i > 1000 def applyF_filterG(L, f, g): """ Assumes L is a list of integers Assume functions f and g are defined for you. f takes in an integer, applies a function, returns another integer g takes in an integer, applies a Boolean function, returns either True or False Mutates L such that, for each element i originally in L, L contains i if g(f(i)) returns True, and no other elements Returns the largest element in the mutated L or -1 if the list is empty """ L1 = L[:] for element in L1: if g(f(element)) == False: L.remove(element) if L == [] : return -1 else: print(L) return max(L) L = [55550, 2500, 545645, 66554, 6000] print(applyF_filterG(L, f, g))
# # PySNMP MIB module CISCO-VOICE-DNIS-MIB (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-VOICE-DNIS-MIB # Produced by pysmi-0.3.4 at Mon Apr 29 18:03:03 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") ConstraintsIntersection, ConstraintsUnion, SingleValueConstraint, ValueSizeConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ConstraintsUnion", "SingleValueConstraint", "ValueSizeConstraint", "ValueRangeConstraint") ciscoMgmt, = mibBuilder.importSymbols("CISCO-SMI", "ciscoMgmt") ObjectGroup, NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "NotificationGroup", "ModuleCompliance") iso, NotificationType, Integer32, Counter64, Gauge32, Unsigned32, MibIdentifier, Counter32, MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectIdentity, TimeTicks, ModuleIdentity, Bits, IpAddress = mibBuilder.importSymbols("SNMPv2-SMI", "iso", "NotificationType", "Integer32", "Counter64", "Gauge32", "Unsigned32", "MibIdentifier", "Counter32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectIdentity", "TimeTicks", "ModuleIdentity", "Bits", "IpAddress") DisplayString, TextualConvention, RowStatus, TruthValue = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention", "RowStatus", "TruthValue") ciscoVoiceDnisMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 9, 219)) if mibBuilder.loadTexts: ciscoVoiceDnisMIB.setLastUpdated('200205010000Z') if mibBuilder.loadTexts: ciscoVoiceDnisMIB.setOrganization('Cisco Systems, Inc.') class DnisMapname(TextualConvention, OctetString): status = 'current' displayHint = '32a' subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(0, 32) class CvE164String(TextualConvention, OctetString): status = 'current' displayHint = '32a' subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(1, 32) cvDnisMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 219, 1)) cvDnisMap = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 219, 1, 1)) cvDnisMappingTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 219, 1, 1, 1), ) if mibBuilder.loadTexts: cvDnisMappingTable.setStatus('current') cvDnisMappingEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 219, 1, 1, 1, 1), ).setIndexNames((1, "CISCO-VOICE-DNIS-MIB", "cvDnisMappingName")) if mibBuilder.loadTexts: cvDnisMappingEntry.setStatus('current') cvDnisMappingName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 219, 1, 1, 1, 1, 1), DnisMapname().subtype(subtypeSpec=ValueSizeConstraint(1, 32))) if mibBuilder.loadTexts: cvDnisMappingName.setStatus('current') cvDnisMappingUrl = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 219, 1, 1, 1, 1, 2), DisplayString()).setMaxAccess("readcreate") if mibBuilder.loadTexts: cvDnisMappingUrl.setStatus('current') cvDnisMappingRefresh = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 219, 1, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("idle", 1), ("refresh", 2))).clone('idle')).setMaxAccess("readcreate") if mibBuilder.loadTexts: cvDnisMappingRefresh.setStatus('current') cvDnisMappingUrlAccessError = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 219, 1, 1, 1, 1, 4), DisplayString()).setMaxAccess("readonly") if mibBuilder.loadTexts: cvDnisMappingUrlAccessError.setStatus('current') cvDnisMappingStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 219, 1, 1, 1, 1, 5), RowStatus()).setMaxAccess("readcreate") if mibBuilder.loadTexts: cvDnisMappingStatus.setStatus('current') cvDnisNodeTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 219, 1, 1, 2), ) if mibBuilder.loadTexts: cvDnisNodeTable.setStatus('current') cvDnisNodeEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 219, 1, 1, 2, 1), ).setIndexNames((0, "CISCO-VOICE-DNIS-MIB", "cvDnisMappingName"), (1, "CISCO-VOICE-DNIS-MIB", "cvDnisNumber")) if mibBuilder.loadTexts: cvDnisNodeEntry.setStatus('current') cvDnisNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 219, 1, 1, 2, 1, 1), CvE164String()) if mibBuilder.loadTexts: cvDnisNumber.setStatus('current') cvDnisNodeUrl = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 219, 1, 1, 2, 1, 2), DisplayString()).setMaxAccess("readcreate") if mibBuilder.loadTexts: cvDnisNodeUrl.setStatus('current') cvDnisNodeModifiable = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 219, 1, 1, 2, 1, 3), TruthValue()).setMaxAccess("readonly") if mibBuilder.loadTexts: cvDnisNodeModifiable.setStatus('current') cvDnisNodeStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 219, 1, 1, 2, 1, 4), RowStatus()).setMaxAccess("readcreate") if mibBuilder.loadTexts: cvDnisNodeStatus.setStatus('current') cvDnisMIBNotificationPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 219, 2)) cvDnisMIBNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 219, 2, 0)) cvDnisMappingUrlInaccessible = NotificationType((1, 3, 6, 1, 4, 1, 9, 9, 219, 2, 0, 1)).setObjects(("CISCO-VOICE-DNIS-MIB", "cvDnisMappingUrl"), ("CISCO-VOICE-DNIS-MIB", "cvDnisMappingUrlAccessError")) if mibBuilder.loadTexts: cvDnisMappingUrlInaccessible.setStatus('current') cvDnisMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 219, 3)) cvDnisMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 219, 3, 1)) cvDnisMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 219, 3, 2)) cvDnisMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 219, 3, 1, 1)).setObjects(("CISCO-VOICE-DNIS-MIB", "cvDnisGroup"), ("CISCO-VOICE-DNIS-MIB", "cvDnisNotificationGroup")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): cvDnisMIBCompliance = cvDnisMIBCompliance.setStatus('current') cvDnisGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 219, 3, 2, 1)).setObjects(("CISCO-VOICE-DNIS-MIB", "cvDnisMappingUrl"), ("CISCO-VOICE-DNIS-MIB", "cvDnisMappingRefresh"), ("CISCO-VOICE-DNIS-MIB", "cvDnisMappingUrlAccessError"), ("CISCO-VOICE-DNIS-MIB", "cvDnisMappingStatus"), ("CISCO-VOICE-DNIS-MIB", "cvDnisNodeUrl"), ("CISCO-VOICE-DNIS-MIB", "cvDnisNodeModifiable"), ("CISCO-VOICE-DNIS-MIB", "cvDnisNodeStatus")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): cvDnisGroup = cvDnisGroup.setStatus('current') cvDnisNotificationGroup = NotificationGroup((1, 3, 6, 1, 4, 1, 9, 9, 219, 3, 2, 2)).setObjects(("CISCO-VOICE-DNIS-MIB", "cvDnisMappingUrlInaccessible")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): cvDnisNotificationGroup = cvDnisNotificationGroup.setStatus('current') mibBuilder.exportSymbols("CISCO-VOICE-DNIS-MIB", cvDnisNodeUrl=cvDnisNodeUrl, cvDnisMappingStatus=cvDnisMappingStatus, cvDnisMIBNotificationPrefix=cvDnisMIBNotificationPrefix, cvDnisNumber=cvDnisNumber, cvDnisMappingEntry=cvDnisMappingEntry, cvDnisMIBGroups=cvDnisMIBGroups, cvDnisNodeTable=cvDnisNodeTable, cvDnisGroup=cvDnisGroup, cvDnisMappingTable=cvDnisMappingTable, cvDnisMappingUrlInaccessible=cvDnisMappingUrlInaccessible, cvDnisMIBObjects=cvDnisMIBObjects, cvDnisMappingRefresh=cvDnisMappingRefresh, cvDnisMappingUrl=cvDnisMappingUrl, CvE164String=CvE164String, cvDnisMappingUrlAccessError=cvDnisMappingUrlAccessError, DnisMapname=DnisMapname, ciscoVoiceDnisMIB=ciscoVoiceDnisMIB, cvDnisMap=cvDnisMap, cvDnisMIBConformance=cvDnisMIBConformance, cvDnisMIBCompliances=cvDnisMIBCompliances, cvDnisMIBCompliance=cvDnisMIBCompliance, cvDnisNodeModifiable=cvDnisNodeModifiable, cvDnisNodeStatus=cvDnisNodeStatus, cvDnisMappingName=cvDnisMappingName, cvDnisNotificationGroup=cvDnisNotificationGroup, cvDnisMIBNotifications=cvDnisMIBNotifications, cvDnisNodeEntry=cvDnisNodeEntry, PYSNMP_MODULE_ID=ciscoVoiceDnisMIB)
""" oops """ print(0, 1 == 1, 0)
# acronymsBuilder.py # A program to build acronyms from a phrase # by Tung Nguyen def main(): # declare program function: print("This program builds acronyms.") print() # prompt user to input the sentence: sentence = input("Enter a phrase: ") # split the sentence into a list that contains words listWord = sentence.split() # assign a variable that will be used to store each of the first letter of every word in the sentence acronym = "" # loop through the list of words in the sentence (listWord) -> pick the first letter of every word -> store it in acronym variable for x in listWord: acronym += x[0].upper() # print out the result print("The acronym is " + acronym) main()
# x y x y #r1 = [[0, 0], [5, 5]] r1 = [[-5, -5], [-2, -2]] r2 = [[7, 1], [1, 8]] r3 = [[-1.2, 4], [3.7, 1.1]] def rect_intersection_area(r1, r2, r3): area = 0 r1_p1, r1_p2 = r1 r2_p1, r2_p2 = r2 r3_p1, r3_p2 = r3 right_x_p = 0 left_x_p = 0 top_y_p = 0 bottom_y_p = 0 # check for x # max of left left_x_p = max(min(r2_p1[0], r2_p2[0]), min(r1_p1[0], r1_p2[0]), min(r3_p1[0], r3_p2[0])) # min of right right_x_p = min(max(r2_p1[0], r2_p2[0]), max(r1_p1[0], r1_p2[0]), min(r3_p1[0], r3_p2[0])) # max of left top_y_p = max(min(r2_p1[1], r2_p2[1]), min(r1_p1[1], r1_p2[1]), min(r3_p1[1], r3_p2[1])) # min of right bottom_y_p = min(max(r2_p1[1], r2_p2[1]), max(r1_p1[1], r1_p2[1]), min(r3_p1[1], r3_p2[1])) print('left_x_p:', left_x_p) print('right_x_p:', right_x_p) print('top_y_p:', top_y_p) print('bottom_y_p:', bottom_y_p) y_length = max(top_y_p, bottom_y_p) - min(top_y_p, bottom_y_p) x_length = max(left_x_p, right_x_p) - min(left_x_p, right_x_p) if right_x_p < left_x_p: # no overlap return 0 if bottom_y_p < top_y_p: # no overlap return 0 area = y_length * x_length return area #def is_within() print(rect_intersection_area(r1, r2, r3))
# https://atcoder.jp/contests/abs/tasks/practice_1 def resolve(): a = int(input()) b, c = list(map(int, input().split())) d = input() print("{} {}".format(a + b + c, d))
class IpConfiguration: options: object def __init__(self, options): self.options = options
#!/usr/bin/env python3 # Compares two lexicon files providing several stats # @author Cristian TG # @since 2021/04/15 # Please change the value of these variables: LEXICON_1 = 'lexicon1.txt' LEXICON_2 = 'lexicon2.txt' SHOW_DETAILS = True DISAMBIGUATION_SYMBOL = '#' ############################################################### ############################################################### #import os def getLexicon(lexicon, path): with open(path) as lexi: for line in lexi: aux = line.split('\t') lexicon[aux[0]] = aux[1].replace("\n","").split(" ") return lexicon def getWords(lexicon): words = set() for word in lexicon: words.add(word) return words def getCharacters(lexicon): characters = set() for word in lexicon: for c in word: characters.add(c) return characters def getPhonemes(lexicon): phonemes = set() disambiguation = set() for word in lexicon: phones = lexicon[word] for p in phones: if DISAMBIGUATION_SYMBOL not in p: phonemes.add(p) else: disambiguation.add(p) return phonemes, disambiguation ############################################################# lexicon1 = getLexicon({}, LEXICON_1) lexicon2 = getLexicon({}, LEXICON_2) words_l1 = getWords(lexicon1) words_l2 = getWords(lexicon2) characters_l1 = getCharacters(lexicon1) characters_l2 = getCharacters(lexicon2) phonemes_l1, disambiguation_l1 = getPhonemes(lexicon1) phonemes_l2, disambiguation_l2 = getPhonemes(lexicon2) print("\nLEXICON_1", LEXICON_1, "LEXICON_2", LEXICON_2) print("- Number of words:", len(words_l1), len(words_l2), len(words_l1)-len(words_l2)) print("\n- Number of common words:", len(words_l1&words_l2)) if SHOW_DETAILS: print(words_l1 & words_l2) print("- Number of words included in 1 (not in 2):", len(words_l1 - words_l2)) if SHOW_DETAILS: print(words_l1 - words_l2) print("- Number of words included in 2 (not in 1):", len(words_l2 - words_l1)) if SHOW_DETAILS: print(words_l2 - words_l1) print("\n- Number of characters:", len(characters_l1), len(characters_l2), len(characters_l1)-len(characters_l2)) print("- Number of common characters:", len(characters_l1 & characters_l2)) if SHOW_DETAILS: print(characters_l1 & characters_l2) print("- Number of characters included in 1 (not in 2):", len(characters_l1 - characters_l2)) if SHOW_DETAILS: print(characters_l1 - characters_l2) print("- Number of characters included in 2 (not in 1):", len(characters_l2 - characters_l1)) if SHOW_DETAILS: print(characters_l2 - characters_l1) print("\n- Number of phonemes:", len(phonemes_l1), len(phonemes_l2), len(phonemes_l1)-len(phonemes_l2)) print("- Number of common phonemes:", len(phonemes_l1 & phonemes_l2)) if SHOW_DETAILS: print(phonemes_l1 & phonemes_l2) print("- Number of phonemes included in 1 (not in 2):", len(phonemes_l1 - phonemes_l2)) if SHOW_DETAILS: print(phonemes_l1 - phonemes_l2) print("- Number of phonemes included in 2 (not in 1):", len(phonemes_l2 - phonemes_l1)) if SHOW_DETAILS: print(phonemes_l2 - phonemes_l1) print("\n- Number of disambiguation symbols:", len(disambiguation_l1), len(disambiguation_l2), len(disambiguation_l1)-len(disambiguation_l2)) print("- Number of common disambiguation symbols:", len(disambiguation_l1 & disambiguation_l2)) if SHOW_DETAILS: print(disambiguation_l1 & disambiguation_l2) print("- Number of disambiguation symbols included in 1 (not in 2):", len(disambiguation_l1 - disambiguation_l2)) if SHOW_DETAILS: print(disambiguation_l1 - disambiguation_l2) print("- Number of disambiguation symbols included in 2 (not in 1):", len(disambiguation_l2 - disambiguation_l1)) if SHOW_DETAILS: print(disambiguation_l2 - disambiguation_l1)
def three_word(a, b, c): title = a if title == '\"\"': title = "\'\'" else: title = "\'{0}\'".format(title) tag = b if tag == '\"\"': tag = "\'\'" else: tag = "\'{0}\'".format(tag) description = c if description == '\"\"': description = "\'\'" else: description = "\'{0}\'".format(description) return title, tag, description def format_file(old_file, new_file): try: format_file = open(new_file, 'a') with open(old_file, 'r') as face_file: for line in face_file: line = line.split('\n')[0] title, tag, description = three_word(line.split('\t')[4], line.split('\t')[5], line.split('\t')[6]) face_data = "{0}\t\'{1}\'\t\'{2}\'\t{3}\t{4}\t{5}\t{6}\t" \ "{7}\t{8}\t{9}\t\'{10}\'\t{11}\t{12}\t{13}\t{14}\t{15}\t{16}\t" \ "{17}\t{18}\t{19}\t{20}\t{21}\t{22}\t{23}\t{24}\t{25}\n" \ .format(line.split('\t')[0], line.split('\t')[1], line.split('\t')[2], line.split('\t')[3], title, tag, description, line.split('\t')[7], line.split('\t')[8], line.split('\t')[9], line.split('\t')[10], line.split('\t')[11], line.split('\t')[12], line.split('\t')[13], line.split('\t')[14], line.split('\t')[15], line.split('\t')[16], line.split('\t')[17], line.split('\t')[18], line.split('\t')[19], line.split('\t')[20], line.split('\t')[21], line.split('\t')[22], line.split('\t')[23], line.split('\t')[24], line.split('\t')[25]) format_file.writelines(face_data) format_file.close() except Exception as e: with open('log.txt', 'a') as log: log.writelines("{0},{1}".format(new_file, e)) if __name__ == '__main__': start =1 end = 135 for i in range(start, end): print(i) format_file(r'D:\Users\KYH\Desktop\EmotionMap\FlickrEmotionData\4face_all\face{0}.txt'.format(i), r'D:\Users\KYH\Desktop\EmotionMap\FlickrEmotionData\5face_format\face{0}.txt'.format(i))
# [Skill] Cygnus Constellation (20899) echo = 10001005 cygnusConstellation = 1142597 cygnus = 1101000 if sm.canHold(cygnusConstellation): sm.setSpeakerID(cygnus) sm.sendNext("You have exceeded all our expectations. Please take this as a symbol of your heroism.\r\n" "#s" + str(echo) + "# #q" + str(echo) + "#\r\n" "#i" + str(cygnusConstellation) + "# #z" + str(cygnusConstellation) + "#") sm.completeQuest(parentID) sm.giveSkill(echo) sm.giveItem(cygnusConstellation) else: sm.setSpeakerID(cygnus) sm.sendSayOkay("Please make room in your Equip inventory.")
# Definition for singly-linked list. # class ListNode: # def __init__(self, val=0, next=None): # self.val = val # self.next = next class Solution: def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode: ans = str(int(self.combine(l1)) + int(self.combine(l2))) return self.separate(ans, len(ans) - 1) def combine(self, lst): if lst.next: return self.combine(lst.next) + str(lst.val) else: return str(lst.val) def separate(self, string, i): if i < 1: return ListNode(int(string[0]), None) else: return ListNode(int(string[i]), self.separate(string, i-1))
def http_exception(response): raise HTTPException(response) class HTTPException(Exception): def __init__(self, response): self._response = response def __str__(self): return self._response.message @property def is_redirect(self): return self._response.is_redirect @property def is_client_error(self): return self._response.is_client_error @property def is_server_error(self): return self._response.is_server_error
coins = [ 100, 50, 25, 5, 1 ] total = 0 change = 130 for i in range(len(coins)): numCoins = change // coins[i] change -= numCoins * coins[i] total += numCoins print(total) ''' numCoins = 75 // 100 = 0 change = change - 0 * 100 change = change '''
#!/usr/bin/env python # -*- coding: utf-8 -*- DELIMITER = "\r\n" def encode(*args): "Pack a series of arguments into a value Redis command" result = [] result.append("*") result.append(str(len(args))) result.append(DELIMITER) for arg in args: result.append("$") result.append(str(len(arg))) result.append(DELIMITER) result.append(arg) result.append(DELIMITER) return "".join(result) def decode(data): processed, index = 0, data.find(DELIMITER) if index == -1: index = len(data) term = data[processed] if term == "*": return parse_multi_chunked(data) elif term == "$": return parse_chunked(data) elif term == "+": return parse_status(data) elif term == "-": return parse_error(data) elif term == ":": return parse_integer(data) def parse_stream(data): cursor = 0 data_len = len(data) result = [] while cursor < data_len: pdata = data[cursor:] index = pdata.find(DELIMITER) count = int(pdata[1:index]) cmd = '' start = index + len(DELIMITER) for i in range(count): chunk, length = parse_chunked(pdata, start) start = length + len(DELIMITER) cmd += " " + chunk cursor += start result.append(cmd.strip()) return result def parse_multi_chunked(data): index = data.find(DELIMITER) count = int(data[1:index]) result = [] start = index + len(DELIMITER) for i in range(count): chunk, length = parse_chunked(data, start) start = length + len(DELIMITER) result.append(chunk) return result def parse_chunked(data, start=0): index = data.find(DELIMITER, start) if index == -1: index = start length = int(data[start + 1:index]) if length == -1: if index + len(DELIMITER) == len(data): return None else: return None, index else: result = data[index + len(DELIMITER):index + len(DELIMITER) + length] return result if start == 0 else [result, index + len(DELIMITER) + length] def parse_status(data): return [True, data[1:]] def parse_error(data): return [False, data[1:]] def parse_integer(data): return [int(data[1:])] if __name__ == '__main__': print(decode(encode("ping"))) print((encode("set some value"))) print(encode("foobar")) data = '*3\r\n$3\r\nSET\r\n$15\r\nmemtier-8232902\r\n$2\r\nxx\r\n*3\r\n$3\r\nSET\r\n$15\r\nmemtier-8232902\r\n$2\r\nxx\r\n*3\r\n$3\r\nSET\r\n$15\r\nmemtier-7630684\r\n$3\r\nAAA\r\n' print(parse_stream(data))
#print hello with arguments def hello(name): print('hello ' + name) hello('bob') hello('alice')
n = int(input()) gerais = [e for e in input().split()] m = int(input()) proib = [e for e in input().split()] q = int(input()) arr = [e for e in input().split()] for i in range(q): key = arr[i] esq = 0 dir = m-1 achou = False while esq <= dir: meio = (esq + dir) // 2 if proib[meio] == key: achou = True break elif key < proib[meio]: dir = meio - 1 else: esq = meio + 1 if achou: print('Proibido') else: print('Geral')
n = int(input()) calculadora = 1 for i in range(n): valor, operacao = input().split() valor = int(valor) if(operacao == '/'): calculadora = calculadora / valor else: calculadora = calculadora * valor print("{0:.0f}".format(calculadora))
BEHIND_PROXY = True SWAGGER_BASEPATH = "" DEFAULT_DATABASE = "dev" DATABASES = ["test"] ENV = "development" DEBUG = True
class Solution: def findMaxValueOfEquation(self, points: List[List[int]], k: int) -> int: ans = -math.inf maxHeap = [] # (y - x, x) for x, y in points: while maxHeap and x + maxHeap[0][1] > k: heapq.heappop(maxHeap) if maxHeap: ans = max(ans, x + y - maxHeap[0][0]) heapq.heappush(maxHeap, (x - y, -x)) return ans
class Feature: def __init__(self, value, string, infix_string, size=0, fitness=1, original_variable=False): self.value = value self.fitness = fitness self.string = string self.infix_string = infix_string self.size = size self.original_variable = original_variable def __str__(self): return self.string
def my_range(n): i = 0 while i <= n: yield i i += 1 yield 'there are no values left' gen = my_range(4) for i in range(7): print(next(gen))
""" pattern_stringHEAD_stringTAIL_list( "brianxxxcvbpythonvvvvvvvvvgghhbrianpppfgpython","brian","python") returns ["xxxcvb","pppfg"] pattern_stringHEAD_stringTAIL_list( "susanvenezuelastronggghhsusancanadastrong","susan","strong") returns ["venezuela","canada"] pattern_stringHEAD_stringTAIL_list( "boatxvmotorvvvmotorvgghmotor","boat","motor") returns ["xv"] """ def pattern_stringHEAD_stringTAIL_list(test_string,head_string,tail_string): ptr_start_list=[] #pointer to a valid head ptr_end_list=[] #pointer to a valid tail frame_string_list=[] #list containing strings found between header and tail for j in range(len( test_string )): #look throughout the test_string for a head and a tail noting their locations/pointers if((test_string[j:j+len(head_string)]) == head_string): ptr_start_list = ptr_start_list + [j] if((test_string[j:j+len(tail_string)]) == tail_string): ptr_end_list = ptr_end_list + [j] number_of_frames = len( ptr_start_list ) #it is expected that valid strings require an equal amount of head and tail strings if(len(ptr_end_list)<len(ptr_start_list)): number_of_frames = len( ptr_end_list ) #choose least value of pointers to determine number of valid strings available for x in range(number_of_frames_string): frame_string_list = frame_string_list + test_string[ (ptr_start_list[ x+len(head_string)]) : ptr_end_list[x] ] #store the valid strings in a list return frame_string_list
def merge_values(original: dict, new_values: dict): """ if a value in a dictionary is also a dictionary we want to keep the old information inside it """ for key, value in new_values.items(): if isinstance(value, dict): original[key] = merge_values(original.get(key, dict()), value) else: original[key] = value return original
# Advent of code Year 2021 Day 02 solution # Author = Anmol Gupta # Date = December 2021 input = list() with open("input.txt", "r") as input_file: input = input_file.readlines() def get_command(line): splitInput = line.strip().split() return (splitInput[0], int(splitInput[1])) input_commands = [get_command(line) for line in input] # 1 horizontal_position = 0 depth = 0 for action, magnitude in input_commands: if action == "forward": horizontal_position += magnitude elif action == "up": depth -= magnitude elif action == "down": depth += magnitude print("Part One : " + str(horizontal_position * depth)) # 2 horizontal_position = 0 depth = 0 aim = 0 for action, magnitude in input_commands: if action == "forward": horizontal_position += magnitude depth += aim * magnitude elif action == "up": aim -= magnitude elif action == "down": aim += magnitude print("Part Two : " + str(horizontal_position * depth))
class nullcontext: """ A replacement for `contextlib.nullcontext` for python versions before 3.7 """ def __enter__(self): pass def __exit__(self, exc_type, exc_val, exc_tb): pass
""" * Assignment: Sequence List Many * Required: yes * Complexity: easy * Lines of code: 3 lines * Time: 5 min English: 1. Create list `a` with data from row 1 2. Create list `b` with data from row 2 3. Create list `c` with data from row 3 4. Rewrite data manually: a. Do not automate by writing code b. Do not use `str.split()`, `slice`, `getitem`, `for`, `while` or any other control-flow statement c. Objective is to learn the syntax, not automation d. Convert numerical values to float (manually) 5. Run doctests - all must succeed Polish: 1. Stwórz listę `a` z danymi z wiersza 1 2. Stwórz listę `b` z danymi z wiersza 2 3. Stwórz listę `c` z danymi z wiersza 3 4. Przepisz dane ręcznie: a. Nie automatyzuj pisząc kod b. Nie używaj `str.split()`, `slice`, `getitem`, `for`, `while` lub jakiejkolwiek innej instrukcji sterującej c. Celem jest nauka składni, a nie automatyzacja d. Przekonwertuj wartości numeryczne do float (ręcznie) 5. Uruchom doctesty - wszystkie muszą się powieść Tests: >>> import sys; sys.tracebacklimit = 0 >>> assert a is not Ellipsis, \ 'Assign result to variable: `a`' >>> assert b is not Ellipsis, \ 'Assign result to variable: `b`' >>> assert c is not Ellipsis, \ 'Assign result to variable: `c`' >>> assert type(a) is list, \ 'Variable `a` has invalid type, should be list' >>> assert type(b) is list, \ 'Variable `b` has invalid type, should be list' >>> assert type(c) is list, \ 'Variable `c` has invalid type, should be list' >>> assert len(a) == 5, \ 'Variable `a` length should be 5' >>> assert len(b) == 5, \ 'Variable `b` length should be 5' >>> assert len(c) == 5, \ 'Variable `c` length should be 5' >>> assert (5.8 in a ... and 2.7 in a ... and 5.1 in a ... and 1.9 in a ... and 'virginica' in a) >>> assert (5.1 in b ... and 3.5 in b ... and 1.4 in b ... and 0.2 in b ... and 'setosa' in b) >>> assert (5.7 in c ... and 2.8 in c ... and 4.1 in c ... and 1.3 in c ... and 'versicolor' in c) """ DATA = ['sepal_length,sepal_width,petal_length,petal_width,species', '5.8,2.7,5.1,1.9,virginica', '5.1,3.5,1.4,0.2,setosa', '5.7,2.8,4.1,1.3,versicolor', '6.3,2.9,5.6,1.8,virginica', '6.4,3.2,4.5,1.5,versicolor'] # list[float|str]: with data from row[1]: 5.8, 2.7, 5.1, 1.9 and virginica a = [5.8, 2.7, 5.1, 1.9, "virginica"] # list[float|str]: with data from row[2]: 5.1, 3.5, 1.4, 0.2 and setosa b = [5.1, 3.5, 1.4, 0.2, "setosa"] # list[float|str]: with data from row[3]: 5.7, 2.8, 4.1, 1.3 and versicolor c = [5.7, 2.8, 4.1, 1.3, "versicolor"]
{ 'targets': [ { 'target_name': 'node_libtiepie', 'sources': [ 'src/libtiepie.cc' ], 'include_dirs': [ '<!(node -e "require(\'nan\')")' ], 'conditions': [ [ 'OS=="linux"', { 'libraries': ['-ltiepie'] } ], [ 'OS=="win"', { 'sources': [ 'src/libtiepieloader.cc' ], 'defines': [ 'LIBTIEPIE_DYNAMIC' ], 'include_dirs': [ '<(module_root_dir)/deps/libtiepie/include' ] } ] ] } ] }
# Write a program that prompts the user for a measurement in meters and then con­ # verts it to miles, feet, and inches. measurementMeters = float(input("Enter the measurement in meters: ")) measurementMiles = measurementMeters * 0.000621371192 measurementFeet = measurementMeters * 3.2808399 measurementInches = measurementMeters * 39.3700787 print("Measurement in miles: %f" % measurementMiles) print("Measurement in feet: %.2f" % measurementFeet) print("Measurement in inches: %.2f" % measurementInches)
def typist(s): cur=res=0 for i in s: next_val=int(i.isupper()) res+=1+int(next_val!=cur) cur=next_val return res
class ScopeCache(object): def __init__(self, name: str): """ :param name: human-readable name :param scoped_components: components managed by this scope """ self.name = name def handles_component(self, component: type) -> bool: raise NotImplementedError def is_stored(self, component: type) -> bool: raise NotImplementedError def get(self, component: type) -> object: raise NotImplementedError def store(self, component: type, instance: object): raise NotImplementedError class ComponentNotHandledByScopeException(Exception): pass class UninstantiatedObjectException(Exception): pass
n=int(input()) for i in range(n): a,b,c=[int(x) for x in input().split()] sum=a+b+c if sum==180: print("YES") else: print("NO")
beta = 9. gamma = 0.6 logLX = beta + gamma * logLUV scatter = 0.4 # 0.35 # LX: monochromatic at 2 keV # LUV: monochromatic at 2500 AA
class Solution: def isValid(self, s): if s == '': return True sList = list(s) stack = [] for chr in sList: if len(stack) == 0: stack.append(chr) else: stack.pop() if (chr == ')' and stack[-1] == '(') or (chr == ']' and stack[-1] == '[') or ( chr == '}' and stack[-1] == '{') else stack.append(chr) return True if len(stack) == 0 else False if __name__ == '__main__': s = "{{)}" hh = Solution().isValid(s) print(hh)
#! /usr/bin/env python """ A singleton pattern implemented in python. Adapted from ActiveState Code Recipe 52558: The Singleton Pattern implemented with Python http://code.activestate.com/recipes/52558/ """ class Singleton(object): """ A python singleton """ class SingletonImplementation: """ Implementation of the singleton interface """ def singletonId(self): """ Test method, return singleton id """ return id(self) # Storage for the instance reference __instance = None def __init__(self): """ Create singleton instance """ # Check whether we already have an instance if Singleton.__instance is None: # Create and remember instance Singleton.__instance = Singleton.SingletonImplementation() # Store instance reference as the only member in the handle self.__dict__['_Singleton__instance'] = Singleton.__instance def __getattr__(self, attr): """ Delegate access to implementation """ return getattr(self.__instance, attr) def __setattr__(self, attr, value): """ Delegate access to implementation """ return setattr(self.__instance, attr, value)
class SecretKey: """A wrapper class for representing secret key. Typical format of secret key data would be [p1. p2, p3...] where pi represents polynomials for each coefficient modulus. Elements of each polynomails is taken from {-1, 0, 1} represented in their respective modulus. Attributes: data: A 2-dim list representing secret key values. """ def __init__(self, data): self.data = data
# For loops are used ot iterate over all elements of an iterable # They use use the 'for variable in iterable' syntax for i in range(0, 3): # x is defined in the for loop and usable in this body of the for loop print(i) # prints 0, 1, 2 each on a new line for i in [10, "Hello", "World"]: # We call this iterating through an iterable # Here it would be called iterating through a list print(i) # prints 10 then Hello and then World each on a new line
""" Lista em Python fatiamento append, insert, pop, del, clear, extend, min, max range """ secreto = 'Perfume' digitadas = [] chances = 3 while True: if chances <= 0: print('Você perdeu!!!') break letra = input('Digite uma letra: ') if len(letra) > 1: print('Ahhh isso não vale, digite apenas uma letra.') continue digitadas.append(letra) if letra in secreto: print(f'UHUUULLL, a letra "{letra}" existe na palavra secreta.') else: print(f'AFFzzzz: a letra "{letra}" NÃO EXISTE na palavra secreta.') digitadas.pop() secreto_temporario = '' for letra_secreta in secreto: if letra_secreta in digitadas: secreto_temporario += letra_secreta else: secreto_temporario += '*' if secreto_temporario == secreto: print(f'Que legal, voçê ganhou!!! A palavra era {secreto_temporario}') break else: print(f'A palavra secreta esta assim: {secreto_temporario}') if letra not in secreto: chances -= 1 print(f'Voçê ainda tem {chances} chances.')
#!/usr/bin/env python #-*- coding: utf-8 -*- class Fitness: def __init__(self, criterion, *args, **kwargs): """ Simplest single criterion fitness object. :param criterion: Generic container to store the fitness criterion;. :type criterion: object """ self.criterion = criterion def __gt__(self, other): return self.criterion > other.criterion def __le__(self, other): return not self.__gt__(other) def __lt__(self, other): return not (self.__gt__(other) and self == other) def __ge__(self, other): return self.__gt__(other) or self == other def __str__(self): return "{}".format(self.criterion) class MinimizeFitness(Fitness): def __init__(self, criterion): """ Single criterion fitness object. Instead of maximizing the criterion, we minimize it. :param criterion: Generic container to store the fitness criterion;. :type criterion: object """ super(MinimizeFitness, self).__init__(criterion) def __gt__(self, other): return self.criterion < other.criterion class GapsFitness(Fitness): def __init__(self, criterion, gaps): """ Dual criterion fitness object. The first maximization *criterion* will be compared first and if the criteron is the same, the 2nd *gap* is an inverse criterion that requires minimization. :param criterion: Generic container to store the fitness criterion. :type criterion: object """ super(GapsFitness, self).__init__(criterion) self.gaps = gaps def __gt__(self, other): if self.criterion != other.criterion: return self.criterion > other.criterion return self.gaps < other.gaps def __str__(self): return "{}\t{}".format(self.criterion, self.gaps)
weight = float(input("Please enter weight in kilograms: ")) height = float(input("Please enter height in meters: ")) bmi = weight/(height * height) print("BMI is: ",bmi)
#集合.py def n_gram(txt, n): result = [] for i in range(0, len(txt) - n + 1): result.append(txt[i:i + n]) return result #足し算、掛け算、引き算 # 集合 set_x = set(n_gram('paraparaparadise', 2)) print('X:' + str(set_x)) set_y = set(n_gram('paragraph', 2)) print('Y:' + str(set_y)) # 和 set_wa = set_x | set_y print('和集合:' + str(set_wa)) # 積集合 set_sk = set_x & set_y print('積集合:' + str(set_sk)) # 差集合 set_sa = set_x - set_y print('差集合:' , set_sa) # 'se'の有無 print('seがXに含まれる:' + str('se' in set_x)) print('seがYに含まれる:' + str('se' in set_y))
# -*-coding:Utf-8 -* # Copyright (c) 2010 LE GOFF Vincent # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT # OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """Ce fichier est à la racine du projet. Il permet surtout d'établir une hiérarchie des packages présents. Packages : - abstraits : des classes abstraites, héritées dans le corps et les modules - bases : des classes de base, utiles au corps et prêtes à l'emplois - corps : contient des informations sur le corps, comme la configuration par défaut - primaires : package des modules primaires eux-mêmes étant des sous-paquets - reseau : package consacré au réseau, architecture de haut niveau pour interagir avec les sockets et récupérer les messages en attente - secondaires : les modules secondaires, sous la forme de sous-paquets Consultez le fichier 'kassie.py' si vous découvrez le projet. Ce fichier est "le maître" du projet, c'est lui qu'il faut lancer si vous voulez lancer le serveur et c'est lui qui peut donner une vision globale de l'ensemble du projet. Référez-vous au site officiel www.kassie.fr pour plus d'informations. """
""" A package in which functionality specific to MAGIC H2020 project can be found The rest of the source code should not depend on MAGIC, being base on "plain" MuSIASEM and of course its evolution inside MAGIC project. """
_base_ = ['./pipelines/rand_aug.py'] # dataset settings dataset_type = 'ImageNet' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict( type='RandomResizedCrop', size=224, backend='pillow', interpolation='bicubic'), dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), dict( type='RandAugment', policies={{_base_.rand_increasing_policies}}, num_policies=2, total_level=10, magnitude_level=9, magnitude_std=0.5, hparams=dict( pad_val=[round(x) for x in img_norm_cfg['mean'][::-1]], interpolation='bicubic')), dict( type='RandomErasing', erase_prob=0.25, mode='rand', min_area_ratio=0.02, max_area_ratio=1 / 3, fill_color=img_norm_cfg['mean'][::-1], fill_std=img_norm_cfg['std'][::-1]), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='ToTensor', keys=['gt_label']), dict(type='Collect', keys=['img', 'gt_label']) ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='Resize', size=(248, -1), backend='pillow', interpolation='bicubic'), dict(type='CenterCrop', crop_size=224), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']) ] data = dict( samples_per_gpu=64, workers_per_gpu=4, train=dict( type=dataset_type, data_prefix='data/imagenet/train', pipeline=train_pipeline), val=dict( type=dataset_type, data_prefix='data/imagenet/val', # ann_file='data/imagenet/meta/val.txt', pipeline=test_pipeline), test=dict( # replace `data/val` with `data/test` for standard test type=dataset_type, data_prefix='data/imagenet/val', # ann_file='data/imagenet/meta/val.txt', pipeline=test_pipeline))
# Ler os valores de quatro notas escolares bimestrais de um aluno representadas pelas variáveis N1, N2, N3 e N4.Calcular a média aritmética (variável MD1) desse aluno e apresentar a mensagem "Aprovado" se a média obtida for maior ou igual a 7; caso contrário, o programa deve solicitar a quinta nota (nota de exame, representada pela variável NE) do aluno e calcular uma nova média aritmética (variável MD2) entre a nota de exame e a primeira média aritmética. Se o valor da nova média for maior ou igual a cinco, apresentar a mensagem "Aprovado em exame"; caso contrário, apresentar a mensagem "Reprovado". Informar também, após a apresentação das mensagens, o valor da média obtida pelo aluno N1 = float(input('Infomorme a 1° Nota: ')) N2 = float(input('Infomorme a 2° Nota: ')) N3 = float(input('Infomorme a 3° Nota: ')) N4 = float(input('Infomorme a 4° Nota: ')) MD = (N1 + N2 + N3 + N4)/4 if MD >= 5: print('Sua nota é {:.1f} | APROVADO'.format(MD)) else: NE = float(input('informe a Nota do Exame: ')) MD2 = (N1 + N2 + N3 + N4 + NE)/5 if MD2 >= 5: print('Sua nota é {:.1f} | APROVADO EM EXAME'.format(MD2)) else: print('Sua nota é {:.1f} | REPROVADO'.format(MD2))
RELEVANT_EXTENSIONS = [ "java", "c", "cpp", "h", "py", "js", "xml", "go", "rb", "php", "sh", "scale", "lua", "m", "pl", "ts", "swift", "sql", "groovy", "erl", "swf", "vue", "bat", "s", "ejs", "yaml", "yml", "jar", ] ALLOWED_SITES = [ "for.testing.purposes", "lists.apache.org", "just.an.example.site", "one.more.example.site", "non-existing-url.com", # for testing. "jvndb.jvn.jp", # for trying out: usually does not aviable, but not always, anyway it is a good example ]
with open ('2gram_output_birkbeck.txt') as f: with open ('../data/birkbeck_correct.txt') as g: corrects = [] i = 0 for line in g: corrects.append(line.strip()) for line in f: try: A = line.split(' potential diatance_word:(') misspell = A[0].split(':')[1] B = A[1].split(') ')[0] C = B.split(')(') D = [item.split(', ') for item in C] E = [item[1].strip("'") for item in D] str = "".join([item + ',' for item in E]) out = "" out += misspell + ", " +corrects[i] + ": " + str print(out) i += 1 except IndexError: continue
def set_session_user_profile(request, profile=None): user_profile = { 'use_pop_article': True, 'theme': '', 'is_authenticated': request.user.is_authenticated } if profile: user_profile['use_pop_article'] = profile.use_pop_article user_profile['theme'] = profile.theme request.session['user_profile'] = user_profile
class Solution(object): def readBinaryWatch(self, num): """ :type num: int :rtype: List[str] """ ans=[] for i in range(2**10): if(bin(i).count('1')==num): minute = i & 0b0000111111 hour = i>>6 if hour<12 and minute<60: ans.append("%d:%02d"%(hour, minute)) return ans
# -*- coding: utf-8 -*- # Área de un círculo pi = 3.14159 area_circulo = lambda entRadio:pi*(entRadio**2) radio = 8 # 1ra forma print("El área del círculo es", area_circulo(radio)) # 2da forma print("El área del círculo es",(lambda entRadio:pi*(entRadio**2))(radio))
def squared(n): return n * n def cubed(n): return n * n * n def raise_power(n, power): total = 1 for t in range (power) : total = total * n return total def is_divisible(n, t): return n % t == 0 def is_even(n): return n % 2 == 0 def is_odd(n): return n % 2 != 0 print(list(map(squared, [1, 3, 5, 7, 9, 11, 13, 15]))) print(list(map( lambda x: raise_power (x,3), [1, 2, 5, 7, 9, 11, 13, 15]))) # for functions with multiple args # see: https://www.quora.com/How-do-I-put-multiple-arguments-into-a-map-function-in-Python
# Definition for a binary tree node. # class TreeNode: # def __init__(self, x): # self.val = x # self.left = None # self.right = None class Solution: def buildTree(self, inorder, postorder): """ :type inorder: List[int] :type postorder: List[int] :rtype: TreeNode """ if not postorder: return None node = postorder.pop() root = TreeNode(node) index = inorder.index(node) root.left = self.buildTree(inorder[:index], postorder[:index]) root.right = self.buildTree(inorder[index+1:], postorder[index:]) return root
# HELP INTERACTIVE help(print) print(input.__doc__) # DOCSTRINGS def contador(i, f, p): """ -> Faz uma contagem e mostra na tela. :param i: início da contagem :param f: final da contagem :param p: passo da contagem :return: sem retorno """ c = i while c <= f: print(c, end=' ') c += p print('FIM!') help(contador) # PARÂMETROS OPCIONAIS def soma(a=0, b=0, c=0): soma = a + b + c print(f'A soma é igual a {soma}.') soma(2, 1) #Posso colocar menos argumentos e não vai dar erro. soma() #Posso colocar nenhum argumento e não vai dar erro. # ESCOPO DE VARIÁVEIS def funcao(): n1 = 4 print(f'n1 dentro vale {n1}.') n1 = 2 funcao() print(f'n1 global vale {n1}.') def teste(b): global a # faz com que transforme a variável 'a' em global, logo o '5' abaixo será substituido pelo '8' a = 8 # variável local (só vale dentro da função teste) b += 4 # variável local (só vale dentro da função teste) c = 2 # variável local (só vale dentro da função teste) print(f'A dentro vale {a}.' f'\nB dentro vale {b}.' f'\nC dentro vale {c}.') a = 2 teste(a) # variável global print(f'A fora vale {a}.') # RETORNANDO VALORES def soma(a=0, b=0, c=0): soma = a + b + c return soma # Ideal para pegar o resultado e personalizá-lo da maneira que eu quero, como no print abaixo r1 = soma(1, 2, 5) r2 = soma(2, 5) r3 = soma(9) print(f'A soma é igual a {r1}, {r2} e {r3}.') # EXERCÍCIO AULA 20 - FUNÇÕES (PARTE 2) def fatorial(num=1): f = 1 for c in range(num, 0, -1): f *= c return f n = int(input('Digite um número: ')) print(f'O fatorial de {n} é {fatorial(n)}.') def par(num=0): if n % 2 == 0: return True else: return False n = int(input('Número: ')) print(par(n)) if par(n): print('É par') else: print('Não é par')