lang
stringclasses
10 values
seed
stringlengths
5
2.12k
python
os.chdir("./saved_models/") # os.chdir("./noise_sigma0.1_modified_workbook_finalized_env.py_DDPG/") # To resume a previously trained model: # trainer.train_rl(models_to_train=80, episodes_per_model=1000, last_model_number=39) # To train from scratch: trainer.train_rl(models_to_train=40, episodes_per_model=1000) os.chdir("../")
python
def calc(n): t = Decimal(0) pi = Decimal(0) deno = Decimal(0) k = 0 for k in range(n): t = (Decimal(-1) ** k) * (Decimal(math.factorial(6 * k)) / ((math.factorial(k) ** 3) * (math.factorial(3 * k))) * (13591409+545140134 * k) / (640320 ** (3 * k))) deno = math.factorial(3 * k) * (math.factorial(k) ** Decimal(3)) * (640320 ** (3 * k)) pi += Decimal(t) / Decimal(deno) pi = pi * Decimal(12) / Decimal(640320 ** Decimal(1.5))
python
GREEN = '\033[32m' YELLOW = '\033[33m' CYAN = '\033[36m' GREY = '\033[90m' RESET = '\033[0m' MAX_FILENAME_LEN = 40 def _col(s, c): return '{}{}{}'.format(c, s, RESET)
python
with self.assertRaises(ValueError): ResNetRS50(**invalid_parameters) def test_pretrained_model_can_be_created_with_different_input_shape(self): input_shape = (160, 160, 3) parameters = { "weights": "imagenet", "include_top": True, "input_shape": input_shape, } mock_input = self.rng.uniform((1, *input_shape)) model = ResNetRS50(**parameters) output = model(mock_input, training=False)
python
switch_results = 0 not_switch_results = 0 for sim in range(1000): prizedoor, guess = prizedoor_and_guess() goat_door = show_goat_door(prizedoor, guess) if (not_switch_policy(prizedoor, guess) == True): not_switch_results = not_switch_results + 1 if (switch_policy(prizedoor, guess, goat_door) == True): switch_results = switch_results + 1
python
609. Find Duplicate File in System ''' class Solution: def findDuplicate(self, paths: List[str]) -> List[List[str]]: contents = collections.defaultdict(list) for path in paths: splitedpath = path.split() root = splitedpath[0] for file in splitedpath[1:]: name, content = file.split('(') contents[content].append('/'.join([root, name])) return [contents[key] for key in contents if len(contents[key]) > 1]
python
def dot6fdot(x): s = "%.6f" % (x,) if ("." in s): return s return "." + s def dot6fdot_list(l): s = "" for x in l: s += " " + dot6fdot(x) return s[1:] def xtal6g(x): import string s = "%.6g" % (x,) i = string.find(s, "e")
python
for pi in particles: self.channel_to_framework.append(self.code.particles.new_channel_to(pi)) self.channel_from_framework.append(pi.new_channel_to(self.code.particles)) @property def model_time(self): return self.code.model_time @property def stop(self): return self.code.stop
python
homepage = "http://python-blosc.blosc.org" url = "https://github.com/Blosc/python-blosc/archive/v1.9.1.tar.gz" git = "https://github.com/Blosc/python-blosc.git" version('1.9.1', sha256='ffc884439a12409aa4e8945e21dc920d6bc21807357c51d24c7f0a27ae4f79b9') depends_on('[email protected]:', type=('build', 'run'))
python
is_palindrome = True # 🤩 return is_palindrome # Easy Way 😛 # rvs_text = text[::-1] # if text == rvs_text: # return True
python
[4,5,3], [7,5], [5,3], [2,4], [3,5,2], [3,6], [3,3],
python
DATADIR = 'test_data' DATAFILE = 'data.json' def dlfile(url, dest): try: f = urlopen(url) print "Test file not found locally, downloading: "
python
def CreateDictionaryFromCsv(inputFile): data = {} with open(inputFile, 'r') as f: # Create a csv reader object that uses the opened file. csv_reader = csv.reader(f) # Retrieve the header of the csv file. This is also used as the items that are present. csvHeader = next(csv_reader)
python
from lakefs_client.model.credentials_list import CredentialsList from lakefs_client.model.credentials_with_secret import CredentialsWithSecret from lakefs_client.model.current_user import CurrentUser from lakefs_client.model.diff import Diff from lakefs_client.model.diff_list import DiffList from lakefs_client.model.error import Error from lakefs_client.model.group import Group from lakefs_client.model.group_creation import GroupCreation
python
def ReadToMysql(self):#读取距离并存入数据库 sqlStr=("INSERT INTO learn_soil(sensor_code, is_soil, soil_value, create_time, status) VALUES('{0}',{1},{2},'{3}',{4})".format('101',random.randint(0,99),random.uniform(1,99),datetime.datetime.now(),0)) self.Context.SaveData(sqlStr) pass sol = soil() index = 1 while True: sol.ReadToMysql() print("index:"+str(index)+" time: " +str(datetime.datetime.now())) index = index + 1 time.sleep(0.01)
python
speech_output = "Unknown command" reprompt_text = "Unknown command" if 'slots' in intent: if 'status' in intent['slots']: if 'value' in intent['slots']['status']: deviceStatus = intent['slots']['status']['value'].upper() speech_output = "Turning device " + deviceStatus
python
if(dog_detector(d)): d_d_count += 1 d_d_ratio = (d_d_count / len(dog_files_short)) d_h_ratio = (d_h_count / len(human_files_short))
python
# Subset df to the given category df_cat = df.query('category==@cat') # Set up the parameters for the plotting grid nrows = len(titles) ncols = 1 figsize = (10, 12)
python
def get_modules(premodules): """premodules is a list of strings for file like object where each line is a module, first value is module name and subsequent values are observations in that module. All are separated by tabs. """ modules = OrderedDict() for line in premodules: line = line.strip().split('\t')
python
@pytest.fixture(scope="module") async def ttftt_engine(): return await create_engine(sdl=_SDL, schema_name="test_issue82") @pytest.mark.asyncio async def test_issue82(ttftt_engine): query = """ query { viewer { name ...UndefinedFragment } }
python
speak("Running cowsay command.") obj.show_output() elif opt == 3: speak("Running cowsay command.") obj.show_output() elif opt == 4: LOOP = False class FunCommand: def sl(self): if not check_prog("sl"): if ask_install("sl"):
python
def run_migrations_offline(): engine = interface.get_engine() connection = engine.connect() context.configure(connection=connection, compare_type=True, target_metadata=interface.schema.metadata, sqlalchemy_module_prefix=None) with context.begin_transaction():
python
class TipoConquista(db.Model): id = db.Column(db.Integer, primary_key=True) titulo = db.Column(db.String(128), nullable=False) descricao = db.Column(db.String(255)) destaque = db.Column(db.Boolean, default=False) icone = db.Column(db.String(128), default=False) del_ = db.Column(db.Boolean, default=False)
python
pulumi.set(__self__, "request_message", request_message) @property @pulumi.getter(name="groupIds") def group_ids(self) -> Optional[Sequence[str]]: """ The ID(s) of the group(s) obtained from the remote resource that this private endpoint should connect to. Required on PUT (CreateOrUpdate) requests. """ return pulumi.get(self, "group_ids")
python
# Calculate alternate Kappas (these are for Appendix) # - Apply $\kappa$ calculations period by period # - Do this for the pure TR data and pure scrape data total_dft = beta_to_kappa(process_beta(f_betas_tr)) total_dfs = beta_to_kappa(process_beta(f_betas_sc)) final_df = pd.merge(total_dft, fix_scrape_cols(total_dfs), on=['from', 'to', 'quarter'], how='outer')
python
''' primes = [] for i in range(n, m): if is_prime(i): primes.append(i) return primes def main(): # input
python
# https://atcoder.jp/contests/abc194/tasks/abc194_d N = int(input()) ans = 0 for i in range(1, N): ans += N / i print(ans)
python
# beadcrumbs @property def zengoIdWorks(self) -> list[UserIllustInfo]: return [UserIllustInfo(item) for item in self._item["zengoIdWorks"]]
python
gain_combo=TTK.Combobox(window,width=4) gain_combo['values']=(1,2,4,8,16) gain_combo.current(0) gain_combo.grid(column=1,row=3) # add checkboxes plot_dOD_state=tk.BooleanVar() plot_dOD_state.set(False) plot_dOD_chk=tk.Checkbutton(window,text='Plot delta OD',var=plot_dOD_state) plot_dOD_chk.grid(column=1,row=5)
python
from django.http import JsonResponse from rest_framework.status import ( HTTP_400_BAD_REQUEST, HTTP_201_CREATED )
python
self.assertEqual(top_dep_library_jar[0], 'scala-library-2.12.2.jar') self.assertEqual(top_dep_library_jar[1], '/home/hugemane/.ivy2/cache/org.scala-lang/scala-library/jars/scala-library-2.12.2.jar') self.assertEqual(top_dep_library_jar[1], '/home/artifact/repo/lib/org.scala-lang/scala-library/jars/scala-library-2.12.2.jar') self.assertEqual(last_dep_library_jar[0], 'akka-remote_2.12-2.5.6.jar') self.assertEqual(last_dep_library_jar[1], '/home/hugemane/.ivy2/cache/com.typesafe.akka/akka-remote_2.12/jars/akka-remote_2.12-2.5.6.jar') self.assertEqual(last_dep_library_jar[2], '/home/artifact/repo/lib/com.typesafe.akka/akka-remote_2.12/jars/akka-remote_2.12-2.5.6.jar') def test_generate_artifact_jar_library_dependency_file_jars_as_iterable_jar_dependencies_with_full_path(self): artifact_host = ArtifactHost('artifact', 'artifact.lxd') options = { 'deploy_jar_lib_dir': '--some-remote-deployment-lib-dir--', 'artifact_jar_lib_dep_file': '--some-artifact-jar-lib-dep-file--'
python
k1 = str(letters[int(prediction1) + 1]) k2 = str(letters[int(prediction2) + 1]) if (flag1): s1 += k1 if (flag2): s2 += k2 flag1 = False flag2 = False
python
hm3 = hp.read_map('test_map.fits', dtype=float) hm3 = maps.HealpixSkyMap(hm3) assert(hm1.compatible(hm3)) assert(np.allclose(hm1, hm3)) print('Checking healpy.write_map') os.remove('test_map.fits') hp.write_map('test_map.fits', np.asarray(hm3), dtype=float) hm4 = maps.fitsio.load_skymap_fits('test_map.fits')['T'] assert(hm1.compatible(hm4))
python
queries_path = "../queries/project_creation_cypher.yml" project_creation_cypher = ckg_utils.get_queries(os.path.join(directory, queries_path)) except Exception as err: exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] logger.error("Reading queries from file {}: {}, file: {},line: {}, err: {}".format(queries_path, sys.exc_info(), fname, exc_tb.tb_lineno, err)) return project_creation_cypher
python
session_maker = sessionmaker( autocommit=False, autoflush=False, bind=engine, future=True ) class KeyTypes(Enum): """Class describing the variation in user keys.""" API = 0
python
temp.iat[row_marker, column_marker] = column.get_text() column_marker += 1 if len(columns) > 0: row_marker += 1 # insert week number as field in dataframe temp['Week'] = week temp['Position'] = position for col in temp: try: temp[col] = temp[col].astype(float) except ValueError:
python
import re class PuntuationCleaner(Cleaner): def __init__(self): spanish_punctuation = r"""!¡"#$%&'()*+,-./:;<=>¿?@[\]^_`{|}~¨´§«»¶\\""" self.__pattern = re.compile(f"[{spanish_punctuation}]") def clean(self, text: str) -> str: cleaned_text = self.__pattern.sub(" ", text) return cleaned_text
python
__credits__ = ["<NAME>", "<NAME>"] __license__ = "BSD-3" __version__ = "2021.5.7a" __maintainer__ = "<NAME>" __email__ = "<EMAIL>"
python
# def countNum(N, num): # count = 0 # divNum = num # while( N >= divNum): # count = count + (N // divNum)
python
num2 = num * 2 c1 = Counter(list(str(num))) c2 = Counter(list(str(num2))) if c1==c2: print("Yes") else: print("No")
python
return doc def get_next_token(self): if self.rs: return self.rs.next_token if self._next_token: return self._next_token return None def set_next_token(self, token): self._next_token = token
python
# Create your views here. from django.http import HttpResponse # Главная страница def index(request): return HttpResponse('Главная страница')
python
"""Artificial Base Module""" # Author: <NAME> -- <<EMAIL>> # License: MIT (c) 2016 from .state import State, GeneticState from .environment import Environment
python
if games[i]['result_team1'] != '': expected.append(float(games[i]['probability_team1'])) expected.append(float(games[i]['probability_team2'])) actual.append(float(games[i]['result_team1'])) actual.append(float(games[i]['result_team2'])) mse = np.square(np.subtract(expected,actual)).mean() if mse < best[1]: best[0] = t best[1] = mse print(f'Best t: {best[0]}') print(f'Best RSE: {best[1]}')
python
def test_create_token_missing_field(self): '''test that email and password is required''' res= self.client.post(TOKEN_URL, {'ermail':'unknown', 'password':''}) self.assertNotIn('token', res.data) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
python
StringValue(SHIPPING_GROUP, 'TURN_AROUND_TIME', description=_('Turn around time'), help_text=_('Turn around time in hours. If declared here, this parameter \ will overwrite the one defined in the merchant\'s profile'), default='24'), BooleanValue(SHIPPING_GROUP,
python
factoid = 'primary_interface' def fact(): '''Returns the primary interface of this Mac''' primary_interface = None net_config = SCDynamicStoreCreate(None, "net", None, None) try: states = SCDynamicStoreCopyValue(net_config, "State:/Network/Global/IPv4") primary_interface = states['PrimaryInterface'] except TypeError: pass
python
if i not in kmers: continue hits.append([i, kmers[i], distance(seed, kmers[i]), max(i - s, i - e)]) # No hit if len(hits) == 0: return None, None, len(seed), None # For HPC hits if use_hpc and len(hits) > 1:
python
# show launch application window start = StartDialog() start.show() # execute application app.exec_()
python
INPUT: :param theoretical_model: (TheoreticalSemivariogram) OUTPUT: :return: (bool) True if Theoretical Semivariogram has calculated params or False otherwise """ # Test if (theoretical_model.nugget is None) or (theoretical_model.sill is None) or (theoretical_model.range is None): raise ValueError('Nugget, sill or range of TheoreticalSemivariogram is not set. Please update '
python
import json import logging import aws_lambda_logging import requests log = logging.getLogger() def lambda_handler(event, context): """Sample pure Lambda function
python
container = os.path.dirname(f) # probe print("Considering:",f) deps = [ l for l in open(f) if l.strip().startswith("#include") ] deps = [ l.replace("#include","").strip() for l in deps ] deps = [ l.replace("\"","") for l in deps if not l.startswith("<")]
python
if options.check_chain: check_block_chain(db_env) if options.dump_blkindex: dump_blkindex_summary(db_env) if options.dump_transaction is not None: dump_transaction(db_dir, db_env, options.dump_transaction) if options.dump_block is not None: if len(options.dump_block) < 7: # Probably an integer...
python
import wave from auto_editor.render.tsm.phasevocoder import phasevocoder from auto_editor.timeline import Timeline from auto_editor.utils.log import Log from auto_editor.utils.progressbar import ProgressBar from auto_editor.wavfile import read def make_new_audio( t: int, temp: str,
python
author_email='<EMAIL>', url='https://github.com/kubilus1/pyaskpass', install_requires=[ 'pyglet' ], scripts=['askpass'] )
python
from typing import List class UnionFind: def __init__(self, N): self.node2par = {i: i for i in range(1, N + 1)} self.rank = {i: 0 for i in range(1, N + 1)} def find_par(self, x): if self.node2par[x] != x: x = self.find_par(self.node2par[x])
python
_lossfunc = dml.losses.TripletMarginLoss( margin=configs.triplet.margin_euclidean, reducer=dml.reducers.ThresholdReducer(low=0.), distance=dml.distances.LpDistance( p=2, power=1, normalize_embeddings=True) ) _miner = dml.miners.TripletMarginMiner( margin=configs.triplet.margin_euclidean, type_of_triplets='semihard')
python
# Rebin and save new created histogram and axis plot.histo[i] = plot.histo[i].Rebin(mergeNumberBins) # set view range. it is important to note that the number of bins have changed with the rebinning # the total number and the number of shift must be # corrected with / mergeNumberBins plot.histo[i].GetXaxis().SetRange(int(numberOfBins / (2 * mergeNumberBins) - binShift / mergeNumberBins), int(numberOfBins / (2 * mergeNumberBins) + binShift / mergeNumberBins)) # save copy
python
# strip and split class Solution: def lengthOfLastWord(self, s: str) -> int: if len(s.strip()) == 0: return 0 last_word = s[::-1].split()[0] return len(last_word) # pointer (faster) # time-O(n) # space-O(1) class Solution: def lengthOfLastWord(self, s: str) -> int:
python
from .fixtures import trees class TestTextRenderer(TestCase): def setUp(self):
python
FOLDER = Path(__file__).parent H5 = FOLDER / '_test.h5' def test_write_LAMMPS_dump(): try: traj = ParticleTrajectory(H5) frame = traj.frames[0] frame.array frame.dataframe output = FOLDER / 'lammps.test.dump' frame.to_LAMMPS_dump(output) output.unlink()
python
# Appending numbers in lists for i in range(30): samples.append(i) linear.append(i) quad.append(i**2) cubic.append(i**3) exp.append(1.5**i)
python
@seeder async def tag_seeder(context: blnt.BolinetteContext): if context.env['profile'] == 'development': tag_service: TagService = context.service('tag')
python
CREATE TRIGGER trigger_delete_old_rows_manpower_group AFTER INSERT ON manpower_group EXECUTE PROCEDURE delete_old_rows_manpower_group(); """) session.execute(""" CREATE OR REPLACE FUNCTION delete_old_rows_manpower_individual() RETURNS TRIGGER LANGUAGE plpgsql AS $$ BEGIN DELETE FROM manpower_individual WHERE created_at < NOW() - INTERVAL '1095 days'; RETURN NULL; END; $$;
python
from ..decorators import nested_repeatables, named_as, repeat from ..envs import EnvFactory ef1_prod = EnvFactory() prod = ef1_prod.Env('prod') def ce(line_num, *lines): return config_error(__file__, line_num, *lines) _access_undefined_attribute_expected_repr = """{ "__class__": "ConfigRoot #as: 'ConfigRoot', id: 0000", "env": { "__class__": "Env",
python
from predict import text_to_emotion st.title('Emotion classification from text') st.write('Possible emotions are sadness, anger, love, surprise, fear and joy') text = st.text_input('Enter a text', "I'm sad to be a data scientist")
python
logger.info(f"updated widget {self.widgetName} at: {datetime.datetime.now()}") def generateHtml(self) -> None: # render html with output data htmlList: list = [x["title"] for x in self.output][:self.config["displayNumberOfItems"]] html: str = self.listToHtml(htmlList)
python
name = "ntunnel_test"
python
contents = [{'name': 'myname', 'segment_type': 'vlan'}] self._test_list_resources(resources, cmd, tags=['a', 'b'], response_contents=contents) def test_list_networkprofile_fields(self): """List networkprofile: --fields a --fields b -- --fields c d.""" resources = 'network_profiles' cmd = np.NetworkProfileList(test_cli20.MyApp(sys.stdout), None) contents = [{'name': 'myname', 'segment_type': 'vlan'}] self._test_list_resources(resources, cmd, fields_1=['a', 'b'], fields_2=['c', 'd'], response_contents=contents)
python
from jet_django.models.view_settings import ViewSettings class ViewSettingsFilterSet(django_filters.FilterSet): class Meta: model = ViewSettings fields = ( 'app_label', 'model', 'view',
python
user.telegram_id = 1234 user.user_id = 9876 passed, alert = self.assert_params(user.firstname, "Newname") assert passed, alert passed, alert = self.assert_params(user.lastname, "Lastname1")
python
para.append(runs) return para def li(runs, style): """ Returns a ``p`` (paragraph) with ordered or unordered list style for text runs. *runs* are a list of ``r`` (run) element, see :func:`run`. *style*, an string of supported list style: ``circle``, ``number``, ``disc``and ``square``.
python
os.makedirs(FOLDER_OUT + 'POD', exist_ok=True) np.savez(FOLDER_OUT + '/POD/pod_spatial_basis', phis=Phi_P, PHI_P_SIGMA_P=PHI_P_SIGMA_P) else:
python
def joincsv(prefix, out_filename='all_picks_phasenet.csv'): """Join in one csv the picks.csv generated for sgc_phasenet. The script needs to be run in the general_data_dir. Parameters
python
# not sure why this isn't working yet with lower=False out = tt.slinalg.Cholesky(lower=False)(x) out_fg = theano.gof.FunctionGraph([x], [out]) compare_jax_and_py( out_fg, [(np.eye(10) + np.random.randn(10, 10) * 0.01).astype(tt.config.floatX)] ) out = tt.slinalg.solve(x, b)
python
click.echo(o) ctx.ensure_object(dict) ctx.obj["m"] = m ctx.obj["o"] = o return cli.add_command(cmd1) if __name__ == "__main__":
python
from . import pedestal_residuals from . import saturation_recovery from . import spe_spectrum from . import spe_spectrum_checm from . import spe_spectrum_comparison from . import tf_generation from . import tf_lookup from . import charge_extraction_window from . import annotated_waveform from . import pulse_shape from . import enf_spectrum_comparison
python
license="MIT", packages=["rocrand"], install_requires=["numpy"], test_suite="tests", command_options={ "build_sphinx": {
python
for i in range (len(palabras)): idx= lista_idioma1.index(palabras[i].strip()) if idx !=-1: palabras [i] =lista_idioma2[idx] traducido.write (" ".join(palabras)+"\n") original.close() traducido.close() lista_idioma1=["mi","nombre","es","micaela"] lista_idioma2=["my","name","is","Micaela"]
python
def setUp(self): super(WebhookControllerTest, self).setUp() class DummyConfig(object): bind_port = 8778 cfgopts = DummyConfig() self.controller = webhooks.WebhookController(options=cfgopts)
python
network.add_subnet( user=self.user_object, subnet='10.145.89.0/24' ) network.del_subnet( 1, user=self.user_object, ) del_subnet = network.list_subnets( user=self.user_object )
python
file = sys.argv[1] print file json_data=open(file).read() data = json.loads(json_data) for a in data['addresses']: pubkeyArray=data['addresses'][a]['publicKey']['data'] pubkey="" for pbk in pubkeyArray: pubkey = "%s%02x" % (pubkey,pbk) print "Address:%s" % a print "Public Key:0x%s" % pubkey print "Private Key:0x%s" % data['private_keys'][a]
python
"Error in room_assignments.py - file " "write: " + repr(e), "Adirondack Error") pass # # # Remove this after testing - only for testing when no # # # recent changes are found via the API # # room_file = settings.ADIRONDACK_TXT_OUTPUT + \ # # settings.ADIRONDACK_ROOM_ASSIGNMENTS + '.csv' # # if run_mode == 'auto':
python
def vowel_count(phrase): """Return frequency map of vowels, case-insensitive. >>> vowel_count('rithm school') {'i': 1, 'o': 2} >>> vowel_count('HOW ARE YOU? i am great!') {'o': 2, 'a': 3, 'e': 2, 'u': 1, 'i': 1} """ d = {} v = set('aeiou') for c in phrase.lower():
python
class FlameSensor (SensorBase): def __init__(self, thread_id, notification_queue, sleeptime, pin = 27): super().__init__(thread_id, notification_queue, sleeptime) # python 3 syntax only self.pin = pin GPIO.setwarnings(False) GPIO.setmode(GPIO.BCM) GPIO.setup(pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
python
from .multimodal import *
python
:param kernel_options: A dictionary with all options for the kernel. :type kernel_options: kernel_tuner.interface.Options :param device_options: A dictionary with all options for the device on which the kernel should be tuned.
python
:type nums: List[int] :type target: int :rtype: List[int] """ if not nums: return None if len(nums) < 2: return None dict = {} for i, x in enumerate(nums): y = target - x
python
def __init__(self, optimizer, epsilon=1e-05, hyperpara=0.001, weight_decay=0.0, use_clip=False, decay_filter=lambda x: 'LayerNorm' not in x.name and 'bias' not in x.name, lars_filter=lambda x: 'LayerNorm' not in x.name and 'bias' not in x.name, loss_scale=1.0): super(LARS, self).__init__(0.0, [Parameter(Tensor(0.0), name="trivial")]) self.opt = optimizer self.parameters = optimizer.parameters self.learning_rate = optimizer.learning_rate self.lars = P.LARSUpdate(epsilon, hyperpara, use_clip) self.reciprocal_scale = 1.0 / loss_scale
python
Benchmark.debug() """ Tests to be integrated,
python
def _write32(f, w): f.write(bytearray([ (w >> 0) & 0xff, (w >> 8) & 0xff, (w >> 16) & 0xff, (w >> 24) & 0xff]))
python
conf.x4.dmg = 129 / 100.0 conf.x4.sp = 360 conf.x4.startup = 0 conf.x4.recovery = 65 / 60.0
python
datetime.datetime(2024, 1, 20, 13, 0), datetime.datetime(2024, 10, 26, 14, 0), datetime.datetime(2025, 1, 18, 13, 0), datetime.datetime(2025, 10, 25, 14, 0), datetime.datetime(2026, 1, 17, 13, 0), datetime.datetime(2026, 10, 24, 14, 0), datetime.datetime(2027, 1, 23, 13, 0), datetime.datetime(2027, 10, 23, 14, 0), datetime.datetime(2028, 1, 22, 13, 0), datetime.datetime(2028, 10, 21, 14, 0), datetime.datetime(2029, 1, 20, 13, 0), datetime.datetime(2029, 10, 20, 14, 0),
python
self.cloud_admin_user = self.new_user_ref( domain_id=self.admin_domain['id']) password = uuid.uuid4().hex self.cloud_admin_user['password'] = password self.cloud_admin_user = ( self.identity_api.create_user(self.cloud_admin_user)) self.cloud_admin_user['password'] = password self.just_a_user = self.new_user_ref(domain_id=self.domainA['id']) password = uuid.uuid4().hex self.just_a_user['password'] = password self.just_a_user = self.identity_api.create_user(self.just_a_user) self.just_a_user['password'] = password
python
# self.next = None class Solution: def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode: len1, len2 = self.length(l1), self.length(l2) if len1>len2: l2 = self.pad(l2, len1-len2) else: l1 = self.pad(l1, len2-len1)
python
} transformed_history = sft.transform(history) np.testing.assert_array_equal( transformed_history["fitness"], [0.5, 1.5]) np.testing.assert_array_equal( transformed_history["parameters"], np.array([[1, 2], [3, 4]]) )
python
#Test function def test(arg): print(arg*arg)
python
def queue_pre_commit(func, key=None): """ Queues a function to call before the transaction is committed. Use a key when you want to ensure that an action won't get triggered multiple times. Eg. you might want queue this function (lambda: validate_account(123)) multiple times, but it only makes sense to run it once just before a transaction is committed. In this case, you could use the key 'validate_account.123' to ensure it only runs once. """ pre_commit_function_pool.queue(func, key=key)
python
print(r.text) data = json.loads(r.text) importerId = data["import"]["id"] print "...importer successfuly created! importerId:'" + str(importerId) + "'" print "" print "STEP 2 - Going to load from filesystem the geotif to upload..." upload = {'files': ('country.tiff', open(granule_abs_path, "rb"), 'application/octet-stream')} print "...geotif successfuly loaded! ready to create a run a task for the importer " + str(importerId) + "..." url += "/" + str(importerId) + "/tasks"
python
def select_partition(length: int, min_size: int = 1, max_size: int = None) -> Tuple[int, int]: """Select a partition of a chromosome. :param length: Length of the chromosome. :param min_size: Minimum length of the partition. Defaults to 1. :param max_size: Maximum length of the partition. Defaults to length - 1. :return: Start and end index of the partition. """ partition_size = randint(min_size, length - 1 if max_size is None else max_size) partition_start = randint(0, length - partition_size)